src/hotspot/share/memory/metaspaceShared.cpp
changeset 54927 1512d88b24c6
parent 54847 59ea39bb2809
child 55251 92eec0732eed
child 58678 9cf78a70fa4f
equal deleted inserted replaced
54926:d4e7ccaf1445 54927:1512d88b24c6
    46 #include "memory/metaspace.hpp"
    46 #include "memory/metaspace.hpp"
    47 #include "memory/metaspaceClosure.hpp"
    47 #include "memory/metaspaceClosure.hpp"
    48 #include "memory/metaspaceShared.hpp"
    48 #include "memory/metaspaceShared.hpp"
    49 #include "memory/resourceArea.hpp"
    49 #include "memory/resourceArea.hpp"
    50 #include "memory/universe.hpp"
    50 #include "memory/universe.hpp"
       
    51 #include "memory/dynamicArchive.hpp"
    51 #include "oops/compressedOops.inline.hpp"
    52 #include "oops/compressedOops.inline.hpp"
    52 #include "oops/instanceClassLoaderKlass.hpp"
    53 #include "oops/instanceClassLoaderKlass.hpp"
    53 #include "oops/instanceMirrorKlass.hpp"
    54 #include "oops/instanceMirrorKlass.hpp"
    54 #include "oops/instanceRefKlass.hpp"
    55 #include "oops/instanceRefKlass.hpp"
       
    56 #include "oops/methodData.hpp"
    55 #include "oops/objArrayKlass.hpp"
    57 #include "oops/objArrayKlass.hpp"
    56 #include "oops/objArrayOop.hpp"
    58 #include "oops/objArrayOop.hpp"
    57 #include "oops/oop.inline.hpp"
    59 #include "oops/oop.inline.hpp"
    58 #include "oops/typeArrayKlass.hpp"
    60 #include "oops/typeArrayKlass.hpp"
    59 #include "prims/jvmtiRedefineClasses.hpp"
    61 #include "prims/jvmtiRedefineClasses.hpp"
    79 bool MetaspaceShared::_archive_loading_failed = false;
    81 bool MetaspaceShared::_archive_loading_failed = false;
    80 bool MetaspaceShared::_remapped_readwrite = false;
    82 bool MetaspaceShared::_remapped_readwrite = false;
    81 address MetaspaceShared::_cds_i2i_entry_code_buffers = NULL;
    83 address MetaspaceShared::_cds_i2i_entry_code_buffers = NULL;
    82 size_t MetaspaceShared::_cds_i2i_entry_code_buffers_size = 0;
    84 size_t MetaspaceShared::_cds_i2i_entry_code_buffers_size = 0;
    83 size_t MetaspaceShared::_core_spaces_size = 0;
    85 size_t MetaspaceShared::_core_spaces_size = 0;
       
    86 void* MetaspaceShared::_shared_metaspace_static_top = NULL;
    84 
    87 
    85 // The CDS archive is divided into the following regions:
    88 // The CDS archive is divided into the following regions:
    86 //     mc  - misc code (the method entry trampolines)
    89 //     mc  - misc code (the method entry trampolines)
    87 //     rw  - read-write metadata
    90 //     rw  - read-write metadata
    88 //     ro  - read-only metadata and read-only tables
    91 //     ro  - read-only metadata and read-only tables
   110 // [6] Original class files are copied into the od region.
   113 // [6] Original class files are copied into the od region.
   111 //
   114 //
   112 // The s0/s1 and oa0/oa1 regions are populated inside HeapShared::archive_java_heap_objects.
   115 // The s0/s1 and oa0/oa1 regions are populated inside HeapShared::archive_java_heap_objects.
   113 // Their layout is independent of the other 5 regions.
   116 // Their layout is independent of the other 5 regions.
   114 
   117 
   115 class DumpRegion {
   118 char* DumpRegion::expand_top_to(char* newtop) {
   116 private:
   119   assert(is_allocatable(), "must be initialized and not packed");
   117   const char* _name;
   120   assert(newtop >= _top, "must not grow backwards");
   118   char* _base;
   121   if (newtop > _end) {
   119   char* _top;
   122     MetaspaceShared::report_out_of_space(_name, newtop - _top);
   120   char* _end;
   123     ShouldNotReachHere();
   121   bool _is_packed;
   124   }
   122 
   125   uintx delta;
   123   char* expand_top_to(char* newtop) {
   126   if (DynamicDumpSharedSpaces) {
   124     assert(is_allocatable(), "must be initialized and not packed");
   127     delta = DynamicArchive::object_delta_uintx(newtop);
   125     assert(newtop >= _top, "must not grow backwards");
   128   } else {
   126     if (newtop > _end) {
   129     delta = MetaspaceShared::object_delta_uintx(newtop);
   127       MetaspaceShared::report_out_of_space(_name, newtop - _top);
   130   }
   128       ShouldNotReachHere();
   131   if (delta > MAX_SHARED_DELTA) {
   129     }
   132     // This is just a sanity check and should not appear in any real world usage. This
   130     uintx delta = MetaspaceShared::object_delta_uintx(newtop);
   133     // happens only if you allocate more than 2GB of shared objects and would require
   131     if (delta > MAX_SHARED_DELTA) {
   134     // millions of shared classes.
   132       // This is just a sanity check and should not appear in any real world usage. This
   135     vm_exit_during_initialization("Out of memory in the CDS archive",
   133       // happens only if you allocate more than 2GB of shared objects and would require
   136                                   "Please reduce the number of shared classes.");
   134       // millions of shared classes.
   137   }
   135       vm_exit_during_initialization("Out of memory in the CDS archive",
   138 
   136                                     "Please reduce the number of shared classes.");
   139   MetaspaceShared::commit_shared_space_to(newtop);
   137     }
   140   _top = newtop;
   138 
   141   return _top;
   139     MetaspaceShared::commit_shared_space_to(newtop);
   142 }
   140     _top = newtop;
   143 
   141     return _top;
   144 char* DumpRegion::allocate(size_t num_bytes, size_t alignment) {
   142   }
   145   char* p = (char*)align_up(_top, alignment);
   143 
   146   char* newtop = p + align_up(num_bytes, alignment);
   144 public:
   147   expand_top_to(newtop);
   145   DumpRegion(const char* name) : _name(name), _base(NULL), _top(NULL), _end(NULL), _is_packed(false) {}
   148   memset(p, 0, newtop - p);
   146 
   149   return p;
   147   char* allocate(size_t num_bytes, size_t alignment=BytesPerWord) {
   150 }
   148     char* p = (char*)align_up(_top, alignment);
   151 
   149     char* newtop = p + align_up(num_bytes, alignment);
   152 void DumpRegion::print(size_t total_bytes) const {
   150     expand_top_to(newtop);
   153   tty->print_cr("%-3s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT,
   151     memset(p, 0, newtop - p);
   154                 _name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()), p2i(_base));
   152     return p;
   155 }
   153   }
   156 
   154 
   157 void DumpRegion::print_out_of_space_msg(const char* failing_region, size_t needed_bytes) {
   155   void append_intptr_t(intptr_t n) {
   158   tty->print("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d",
   156     assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment");
   159              _name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base));
   157     intptr_t *p = (intptr_t*)_top;
   160   if (strcmp(_name, failing_region) == 0) {
   158     char* newtop = _top + sizeof(intptr_t);
   161     tty->print_cr(" required = %d", int(needed_bytes));
   159     expand_top_to(newtop);
   162   } else {
   160     *p = n;
   163     tty->cr();
   161   }
   164   }
   162 
   165 }
   163   char* base()      const { return _base;        }
   166 
   164   char* top()       const { return _top;         }
   167 void DumpRegion::pack(DumpRegion* next) {
   165   char* end()       const { return _end;         }
   168   assert(!is_packed(), "sanity");
   166   size_t reserved() const { return _end - _base; }
   169   _end = (char*)align_up(_top, Metaspace::reserve_alignment());
   167   size_t used()     const { return _top - _base; }
   170   _is_packed = true;
   168   bool is_packed()  const { return _is_packed;   }
   171   if (next != NULL) {
   169   bool is_allocatable() const {
   172     next->_base = next->_top = this->_end;
   170     return !is_packed() && _base != NULL;
   173     next->_end = MetaspaceShared::shared_rs()->end();
   171   }
   174   }
   172 
   175 }
   173   void print(size_t total_bytes) const {
   176 
   174     tty->print_cr("%-3s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT,
   177 DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md"), _od_region("od");
   175                   _name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()), p2i(_base));
       
   176   }
       
   177   void print_out_of_space_msg(const char* failing_region, size_t needed_bytes) {
       
   178     tty->print("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d",
       
   179                _name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base));
       
   180     if (strcmp(_name, failing_region) == 0) {
       
   181       tty->print_cr(" required = %d", int(needed_bytes));
       
   182     } else {
       
   183       tty->cr();
       
   184     }
       
   185   }
       
   186 
       
   187   void init(const ReservedSpace* rs) {
       
   188     _base = _top = rs->base();
       
   189     _end = rs->end();
       
   190   }
       
   191   void init(char* b, char* t, char* e) {
       
   192     _base = b;
       
   193     _top = t;
       
   194     _end = e;
       
   195   }
       
   196 
       
   197   void pack(DumpRegion* next = NULL) {
       
   198     assert(!is_packed(), "sanity");
       
   199     _end = (char*)align_up(_top, Metaspace::reserve_alignment());
       
   200     _is_packed = true;
       
   201     if (next != NULL) {
       
   202       next->_base = next->_top = this->_end;
       
   203       next->_end = MetaspaceShared::shared_rs()->end();
       
   204     }
       
   205   }
       
   206   bool contains(char* p) {
       
   207     return base() <= p && p < top();
       
   208   }
       
   209 };
       
   210 
       
   211 
       
   212 DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md");
       
   213 size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0;
   178 size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0;
       
   179 
       
   180 void MetaspaceShared::init_shared_dump_space(DumpRegion* first_space, address first_space_bottom) {
       
   181   // Start with 0 committed bytes. The memory will be committed as needed by
       
   182   // MetaspaceShared::commit_shared_space_to().
       
   183   if (!_shared_vs.initialize(_shared_rs, 0)) {
       
   184     vm_exit_during_initialization("Unable to allocate memory for shared space");
       
   185   }
       
   186   first_space->init(&_shared_rs, (char*)first_space_bottom);
       
   187 }
       
   188 
       
   189 DumpRegion* MetaspaceShared::misc_code_dump_space() {
       
   190   return &_mc_region;
       
   191 }
       
   192 
       
   193 DumpRegion* MetaspaceShared::read_write_dump_space() {
       
   194   return &_rw_region;
       
   195 }
       
   196 
       
   197 DumpRegion* MetaspaceShared::read_only_dump_space() {
       
   198   return &_ro_region;
       
   199 }
       
   200 
       
   201 DumpRegion* MetaspaceShared::optional_data_dump_space() {
       
   202   return &_od_region;
       
   203 }
       
   204 
       
   205 void MetaspaceShared::pack_dump_space(DumpRegion* current, DumpRegion* next,
       
   206                                       ReservedSpace* rs) {
       
   207   current->pack(next);
       
   208 }
   214 
   209 
   215 char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) {
   210 char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) {
   216   return _mc_region.allocate(num_bytes);
   211   return _mc_region.allocate(num_bytes);
   217 }
   212 }
   218 
   213 
   224   assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled");
   219   assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled");
   225 
   220 
   226   // If using shared space, open the file that contains the shared space
   221   // If using shared space, open the file that contains the shared space
   227   // and map in the memory before initializing the rest of metaspace (so
   222   // and map in the memory before initializing the rest of metaspace (so
   228   // the addresses don't conflict)
   223   // the addresses don't conflict)
   229   address cds_address = NULL;
   224   FileMapInfo* mapinfo = new FileMapInfo(true);
   230   FileMapInfo* mapinfo = new FileMapInfo();
       
   231 
   225 
   232   // Open the shared archive file, read and validate the header. If
   226   // Open the shared archive file, read and validate the header. If
   233   // initialization fails, shared spaces [UseSharedSpaces] are
   227   // initialization fails, shared spaces [UseSharedSpaces] are
   234   // disabled and the file is closed.
   228   // disabled and the file is closed.
   235   // Map in spaces now also
   229   // Map in spaces now also
   236   if (mapinfo->initialize() && map_shared_spaces(mapinfo)) {
   230   if (mapinfo->initialize(true) && map_shared_spaces(mapinfo)) {
   237     size_t cds_total = core_spaces_size();
   231     size_t cds_total = core_spaces_size();
   238     cds_address = (address)mapinfo->region_addr(0);
   232     address cds_address = (address)mapinfo->region_addr(0);
       
   233     char* cds_end = (char *)align_up(cds_address + cds_total,
       
   234                                      Metaspace::reserve_alignment());
       
   235 
       
   236     // Mapping the dynamic archive before allocating the class space
       
   237     cds_end = initialize_dynamic_runtime_shared_spaces((char*)cds_address, cds_end);
       
   238 
   239 #ifdef _LP64
   239 #ifdef _LP64
   240     if (Metaspace::using_class_space()) {
   240     if (Metaspace::using_class_space()) {
   241       char* cds_end = (char*)(cds_address + cds_total);
       
   242       cds_end = (char *)align_up(cds_end, Metaspace::reserve_alignment());
       
   243       // If UseCompressedClassPointers is set then allocate the metaspace area
   241       // If UseCompressedClassPointers is set then allocate the metaspace area
   244       // above the heap and above the CDS area (if it exists).
   242       // above the heap and above the CDS area (if it exists).
   245       Metaspace::allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
   243       Metaspace::allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
   246       // map_heap_regions() compares the current narrow oop and klass encodings
   244       // map_heap_regions() compares the current narrow oop and klass encodings
   247       // with the archived ones, so it must be done after all encodings are determined.
   245       // with the archived ones, so it must be done after all encodings are determined.
   251 #endif // _LP64
   249 #endif // _LP64
   252   } else {
   250   } else {
   253     assert(!mapinfo->is_open() && !UseSharedSpaces,
   251     assert(!mapinfo->is_open() && !UseSharedSpaces,
   254            "archive file not closed or shared spaces not disabled.");
   252            "archive file not closed or shared spaces not disabled.");
   255   }
   253   }
       
   254 }
       
   255 
       
   256 char* MetaspaceShared::initialize_dynamic_runtime_shared_spaces(
       
   257         char* static_start, char* static_end) {
       
   258   assert(UseSharedSpaces, "must be runtime");
       
   259   char* cds_end = static_end;
       
   260   if (!DynamicDumpSharedSpaces) {
       
   261     address dynamic_top = DynamicArchive::map();
       
   262     if (dynamic_top != NULL) {
       
   263       assert(dynamic_top > (address)static_start, "Unexpected layout");
       
   264       MetaspaceObj::expand_shared_metaspace_range(dynamic_top);
       
   265       cds_end = (char *)align_up(dynamic_top, Metaspace::reserve_alignment());
       
   266     }
       
   267   }
       
   268   return cds_end;
       
   269 }
       
   270 
       
   271 ReservedSpace* MetaspaceShared::reserve_shared_rs(size_t size, size_t alignment,
       
   272                                                   bool large, char* requested_address) {
       
   273   if (requested_address != NULL) {
       
   274     _shared_rs = ReservedSpace(size, alignment, large, requested_address);
       
   275   } else {
       
   276     _shared_rs = ReservedSpace(size, alignment, large);
       
   277   }
       
   278   return &_shared_rs;
   256 }
   279 }
   257 
   280 
   258 void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() {
   281 void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() {
   259   assert(DumpSharedSpaces, "should be called for dump time only");
   282   assert(DumpSharedSpaces, "should be called for dump time only");
   260   const size_t reserve_alignment = Metaspace::reserve_alignment();
   283   const size_t reserve_alignment = Metaspace::reserve_alignment();
   278   // We don't support archives larger than 256MB on 32-bit due to limited virtual address space.
   301   // We don't support archives larger than 256MB on 32-bit due to limited virtual address space.
   279   size_t cds_total = align_down(256*M, reserve_alignment);
   302   size_t cds_total = align_down(256*M, reserve_alignment);
   280 #endif
   303 #endif
   281 
   304 
   282   // First try to reserve the space at the specified SharedBaseAddress.
   305   // First try to reserve the space at the specified SharedBaseAddress.
   283   _shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages, shared_base);
   306   //_shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages, shared_base);
       
   307   reserve_shared_rs(cds_total, reserve_alignment, large_pages, shared_base);
   284   if (_shared_rs.is_reserved()) {
   308   if (_shared_rs.is_reserved()) {
   285     assert(shared_base == 0 || _shared_rs.base() == shared_base, "should match");
   309     assert(shared_base == 0 || _shared_rs.base() == shared_base, "should match");
   286   } else {
   310   } else {
   287     // Get a mmap region anywhere if the SharedBaseAddress fails.
   311     // Get a mmap region anywhere if the SharedBaseAddress fails.
   288     _shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages);
   312     //_shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages);
       
   313     reserve_shared_rs(cds_total, reserve_alignment, large_pages, NULL);
   289   }
   314   }
   290   if (!_shared_rs.is_reserved()) {
   315   if (!_shared_rs.is_reserved()) {
   291     vm_exit_during_initialization("Unable to reserve memory for shared space",
   316     vm_exit_during_initialization("Unable to reserve memory for shared space",
   292                                   err_msg(SIZE_FORMAT " bytes.", cds_total));
   317                                   err_msg(SIZE_FORMAT " bytes.", cds_total));
   293   }
   318   }
   322 
   347 
   323   log_info(cds)("Allocated temporary class space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
   348   log_info(cds)("Allocated temporary class space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
   324                 CompressedClassSpaceSize, p2i(tmp_class_space.base()));
   349                 CompressedClassSpaceSize, p2i(tmp_class_space.base()));
   325 #endif
   350 #endif
   326 
   351 
   327   // Start with 0 committed bytes. The memory will be committed as needed by
   352   init_shared_dump_space(&_mc_region);
   328   // MetaspaceShared::commit_shared_space_to().
       
   329   if (!_shared_vs.initialize(_shared_rs, 0)) {
       
   330     vm_exit_during_initialization("Unable to allocate memory for shared space");
       
   331   }
       
   332 
       
   333   _mc_region.init(&_shared_rs);
       
   334   SharedBaseAddress = (size_t)_shared_rs.base();
   353   SharedBaseAddress = (size_t)_shared_rs.base();
   335   tty->print_cr("Allocated shared space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
   354   tty->print_cr("Allocated shared space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
   336                 _shared_rs.size(), p2i(_shared_rs.base()));
   355                 _shared_rs.size(), p2i(_shared_rs.base()));
   337 }
   356 }
   338 
   357 
   340 void MetaspaceShared::post_initialize(TRAPS) {
   359 void MetaspaceShared::post_initialize(TRAPS) {
   341   if (UseSharedSpaces) {
   360   if (UseSharedSpaces) {
   342     int size = FileMapInfo::get_number_of_shared_paths();
   361     int size = FileMapInfo::get_number_of_shared_paths();
   343     if (size > 0) {
   362     if (size > 0) {
   344       SystemDictionaryShared::allocate_shared_data_arrays(size, THREAD);
   363       SystemDictionaryShared::allocate_shared_data_arrays(size, THREAD);
   345       FileMapHeader* header = FileMapInfo::current_info()->header();
   364       if (!DynamicDumpSharedSpaces) {
   346       ClassLoaderExt::init_paths_start_index(header->_app_class_paths_start_index);
   365         FileMapHeader* header;
   347       ClassLoaderExt::init_app_module_paths_start_index(header->_app_module_paths_start_index);
   366         if (FileMapInfo::dynamic_info() == NULL) {
       
   367           header = FileMapInfo::current_info()->header();
       
   368         } else {
       
   369           header = FileMapInfo::dynamic_info()->header();
       
   370         }
       
   371         ClassLoaderExt::init_paths_start_index(header->_app_class_paths_start_index);
       
   372         ClassLoaderExt::init_app_module_paths_start_index(header->_app_module_paths_start_index);
       
   373       }
   348     }
   374     }
   349   }
   375   }
   350 }
   376 }
   351 
   377 
   352 static GrowableArray<Handle>* _extra_interned_strings = NULL;
   378 static GrowableArray<Handle>* _extra_interned_strings = NULL;
   403     }
   429     }
   404   }
   430   }
   405 }
   431 }
   406 
   432 
   407 void MetaspaceShared::commit_shared_space_to(char* newtop) {
   433 void MetaspaceShared::commit_shared_space_to(char* newtop) {
   408   assert(DumpSharedSpaces, "dump-time only");
   434   assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "dump-time only");
   409   char* base = _shared_rs.base();
   435   char* base = _shared_rs.base();
   410   size_t need_committed_size = newtop - base;
   436   size_t need_committed_size = newtop - base;
   411   size_t has_committed_size = _shared_vs.committed_size();
   437   size_t has_committed_size = _shared_vs.committed_size();
   412   if (need_committed_size < has_committed_size) {
   438   if (need_committed_size < has_committed_size) {
   413     return;
   439     return;
   415 
   441 
   416   size_t min_bytes = need_committed_size - has_committed_size;
   442   size_t min_bytes = need_committed_size - has_committed_size;
   417   size_t preferred_bytes = 1 * M;
   443   size_t preferred_bytes = 1 * M;
   418   size_t uncommitted = _shared_vs.reserved_size() - has_committed_size;
   444   size_t uncommitted = _shared_vs.reserved_size() - has_committed_size;
   419 
   445 
   420   size_t commit = MAX2(min_bytes, preferred_bytes);
   446   size_t commit =MAX2(min_bytes, preferred_bytes);
       
   447   commit = MIN2(commit, uncommitted);
   421   assert(commit <= uncommitted, "sanity");
   448   assert(commit <= uncommitted, "sanity");
   422 
   449 
   423   bool result = _shared_vs.expand_by(commit, false);
   450   bool result = _shared_vs.expand_by(commit, false);
   424   if (!result) {
   451   if (!result) {
   425     vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes",
   452     vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes",
   463 
   490 
   464   JavaClasses::serialize_offsets(soc);
   491   JavaClasses::serialize_offsets(soc);
   465   InstanceMirrorKlass::serialize_offsets(soc);
   492   InstanceMirrorKlass::serialize_offsets(soc);
   466   soc->do_tag(--tag);
   493   soc->do_tag(--tag);
   467 
   494 
       
   495   serialize_cloned_cpp_vtptrs(soc);
       
   496   soc->do_tag(--tag);
       
   497 
   468   soc->do_tag(666);
   498   soc->do_tag(666);
   469 }
   499 }
   470 
   500 
   471 address MetaspaceShared::cds_i2i_entry_code_buffers(size_t total_size) {
   501 address MetaspaceShared::cds_i2i_entry_code_buffers(size_t total_size) {
   472   if (DumpSharedSpaces) {
   502   if (DumpSharedSpaces) {
   480     return NULL;
   510     return NULL;
   481   }
   511   }
   482 
   512 
   483   assert(_cds_i2i_entry_code_buffers_size == total_size, "must not change");
   513   assert(_cds_i2i_entry_code_buffers_size == total_size, "must not change");
   484   return _cds_i2i_entry_code_buffers;
   514   return _cds_i2i_entry_code_buffers;
       
   515 }
       
   516 
       
   517 uintx MetaspaceShared::object_delta_uintx(void* obj) {
       
   518   assert(DumpSharedSpaces || DynamicDumpSharedSpaces,
       
   519          "supported only for dumping");
       
   520   if (DumpSharedSpaces) {
       
   521     assert(shared_rs()->contains(obj), "must be");
       
   522   } else {
       
   523     assert(is_in_shared_metaspace(obj) || DynamicArchive::is_in_target_space(obj), "must be");
       
   524   }
       
   525   address base_address = address(SharedBaseAddress);
       
   526   uintx deltax = address(obj) - base_address;
       
   527   return deltax;
   485 }
   528 }
   486 
   529 
   487 // Global object for holding classes that have been loaded.  Since this
   530 // Global object for holding classes that have been loaded.  Since this
   488 // is run at a safepoint just before exit, this is the entire set of classes.
   531 // is run at a safepoint just before exit, this is the entire set of classes.
   489 static GrowableArray<Klass*>* _global_klass_objects;
   532 static GrowableArray<Klass*>* _global_klass_objects;
   587 static void rewrite_nofast_bytecodes_and_calculate_fingerprints() {
   630 static void rewrite_nofast_bytecodes_and_calculate_fingerprints() {
   588   for (int i = 0; i < _global_klass_objects->length(); i++) {
   631   for (int i = 0; i < _global_klass_objects->length(); i++) {
   589     Klass* k = _global_klass_objects->at(i);
   632     Klass* k = _global_klass_objects->at(i);
   590     if (k->is_instance_klass()) {
   633     if (k->is_instance_klass()) {
   591       InstanceKlass* ik = InstanceKlass::cast(k);
   634       InstanceKlass* ik = InstanceKlass::cast(k);
   592       for (int i = 0; i < ik->methods()->length(); i++) {
   635       MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(ik);
   593         Method* m = ik->methods()->at(i);
   636     }
   594         rewrite_nofast_bytecode(m);
   637   }
   595         Fingerprinter fp(m);
   638 }
   596         // The side effect of this call sets method's fingerprint field.
   639 
   597         fp.fingerprint();
   640 void MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(InstanceKlass* ik) {
   598       }
   641   for (int i = 0; i < ik->methods()->length(); i++) {
   599     }
   642     Method* m = ik->methods()->at(i);
       
   643     rewrite_nofast_bytecode(m);
       
   644     Fingerprinter fp(m);
       
   645     // The side effect of this call sets method's fingerprint field.
       
   646     fp.fingerprint();
   600   }
   647   }
   601 }
   648 }
   602 
   649 
   603 // Objects of the Metadata types (such as Klass and ConstantPool) have C++ vtables.
   650 // Objects of the Metadata types (such as Klass and ConstantPool) have C++ vtables.
   604 // (In GCC this is the field <Type>::_vptr, i.e., first word in the object.)
   651 // (In GCC this is the field <Type>::_vptr, i.e., first word in the object.)
   684   _info->set_vtable_size(n);
   731   _info->set_vtable_size(n);
   685 
   732 
   686   intptr_t* p = clone_vtable(name, _info);
   733   intptr_t* p = clone_vtable(name, _info);
   687   assert((char*)p == _md_region.top(), "must be");
   734   assert((char*)p == _md_region.top(), "must be");
   688 
   735 
   689   return p;
   736   return _info->cloned_vtable();
   690 }
   737 }
   691 
   738 
   692 template <class T>
   739 template <class T>
   693 intptr_t* CppVtableCloner<T>::clone_vtable(const char* name, CppVtableInfo* info) {
   740 intptr_t* CppVtableCloner<T>::clone_vtable(const char* name, CppVtableInfo* info) {
   694   if (!DumpSharedSpaces) {
   741   if (!DumpSharedSpaces) {
   757 
   804 
   758   return vtable_len;
   805   return vtable_len;
   759 }
   806 }
   760 
   807 
   761 #define ALLOC_CPP_VTABLE_CLONE(c) \
   808 #define ALLOC_CPP_VTABLE_CLONE(c) \
   762   CppVtableCloner<c>::allocate(#c);
   809   _cloned_cpp_vtptrs[c##_Kind] = CppVtableCloner<c>::allocate(#c);
   763 
   810 
   764 #define CLONE_CPP_VTABLE(c) \
   811 #define CLONE_CPP_VTABLE(c) \
   765   p = CppVtableCloner<c>::clone_vtable(#c, (CppVtableInfo*)p);
   812   p = CppVtableCloner<c>::clone_vtable(#c, (CppVtableInfo*)p);
   766 
   813 
   767 #define ZERO_CPP_VTABLE(c) \
   814 #define ZERO_CPP_VTABLE(c) \
   768  CppVtableCloner<c>::zero_vtable_clone();
   815  CppVtableCloner<c>::zero_vtable_clone();
       
   816 
       
   817 //------------------------------ for DynamicDumpSharedSpaces - start
       
   818 #define DECLARE_CLONED_VTABLE_KIND(c) c ## _Kind,
       
   819 
       
   820 enum {
       
   821   CPP_VTABLE_PATCH_TYPES_DO(DECLARE_CLONED_VTABLE_KIND)
       
   822   _num_cloned_vtable_kinds
       
   823 };
       
   824 
       
   825 static intptr_t** _cloned_cpp_vtptrs = NULL;
       
   826 
       
   827 void MetaspaceShared::serialize_cloned_cpp_vtptrs(SerializeClosure* soc) {
       
   828   soc->do_ptr((void**)&_cloned_cpp_vtptrs);
       
   829 }
       
   830 
       
   831 intptr_t* MetaspaceShared::fix_cpp_vtable_for_dynamic_archive(MetaspaceObj::Type msotype, address obj) {
       
   832   assert(DynamicDumpSharedSpaces, "must");
       
   833   int kind = -1;
       
   834   switch (msotype) {
       
   835   case MetaspaceObj::SymbolType:
       
   836   case MetaspaceObj::TypeArrayU1Type:
       
   837   case MetaspaceObj::TypeArrayU2Type:
       
   838   case MetaspaceObj::TypeArrayU4Type:
       
   839   case MetaspaceObj::TypeArrayU8Type:
       
   840   case MetaspaceObj::TypeArrayOtherType:
       
   841   case MetaspaceObj::ConstMethodType:
       
   842   case MetaspaceObj::ConstantPoolCacheType:
       
   843   case MetaspaceObj::AnnotationsType:
       
   844   case MetaspaceObj::MethodCountersType:
       
   845     // These have no vtables.
       
   846     break;
       
   847   case MetaspaceObj::ClassType:
       
   848     {
       
   849       Klass* k = (Klass*)obj;
       
   850       assert(k->is_klass(), "must be");
       
   851       if (k->is_instance_klass()) {
       
   852         kind = InstanceKlass_Kind;
       
   853       } else {
       
   854         assert(k->is_objArray_klass(),
       
   855                "We shouldn't archive any other klasses in DynamicDumpSharedSpaces");
       
   856         kind = ObjArrayKlass_Kind;
       
   857       }
       
   858     }
       
   859     break;
       
   860 
       
   861   case MetaspaceObj::MethodType:
       
   862     {
       
   863       Method* m = (Method*)obj;
       
   864       assert(m->is_method(), "must be");
       
   865       kind = Method_Kind;
       
   866     }
       
   867     break;
       
   868 
       
   869   case MetaspaceObj::MethodDataType:
       
   870     // We don't archive MethodData <-- should have been removed in removed_unsharable_info
       
   871     ShouldNotReachHere();
       
   872     break;
       
   873 
       
   874   case MetaspaceObj::ConstantPoolType:
       
   875     {
       
   876       ConstantPool *cp = (ConstantPool*)obj;
       
   877       assert(cp->is_constantPool(), "must be");
       
   878       kind = ConstantPool_Kind;
       
   879     }
       
   880     break;
       
   881 
       
   882   default:
       
   883     ShouldNotReachHere();
       
   884   }
       
   885 
       
   886   if (kind >= 0) {
       
   887     assert(kind < _num_cloned_vtable_kinds, "must be");
       
   888     return _cloned_cpp_vtptrs[kind];
       
   889   } else {
       
   890     return NULL;
       
   891   }
       
   892 }
       
   893 
       
   894 //------------------------------ for DynamicDumpSharedSpaces - end
   769 
   895 
   770 // This can be called at both dump time and run time.
   896 // This can be called at both dump time and run time.
   771 intptr_t* MetaspaceShared::clone_cpp_vtables(intptr_t* p) {
   897 intptr_t* MetaspaceShared::clone_cpp_vtables(intptr_t* p) {
   772   assert(DumpSharedSpaces || UseSharedSpaces, "sanity");
   898   assert(DumpSharedSpaces || UseSharedSpaces, "sanity");
   773   CPP_VTABLE_PATCH_TYPES_DO(CLONE_CPP_VTABLE);
   899   CPP_VTABLE_PATCH_TYPES_DO(CLONE_CPP_VTABLE);
   828 bool MetaspaceShared::is_valid_shared_method(const Method* m) {
   954 bool MetaspaceShared::is_valid_shared_method(const Method* m) {
   829   assert(is_in_shared_metaspace(m), "must be");
   955   assert(is_in_shared_metaspace(m), "must be");
   830   return CppVtableCloner<Method>::is_valid_shared_object(m);
   956   return CppVtableCloner<Method>::is_valid_shared_object(m);
   831 }
   957 }
   832 
   958 
   833 // Closure for serializing initialization data out to a data area to be
   959 void WriteClosure::do_oop(oop* o) {
   834 // written to the shared file.
   960   if (*o == NULL) {
   835 
   961     _dump_region->append_intptr_t(0);
   836 class WriteClosure : public SerializeClosure {
   962   } else {
   837 private:
   963     assert(HeapShared::is_heap_object_archiving_allowed(),
   838   DumpRegion* _dump_region;
   964            "Archiving heap object is not allowed");
   839 
   965     _dump_region->append_intptr_t(
   840 public:
   966       (intptr_t)CompressedOops::encode_not_null(*o));
   841   WriteClosure(DumpRegion* r) {
   967   }
   842     _dump_region = r;
   968 }
   843   }
   969 
   844 
   970 void WriteClosure::do_region(u_char* start, size_t size) {
   845   void do_ptr(void** p) {
   971   assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
   846     _dump_region->append_intptr_t((intptr_t)*p);
   972   assert(size % sizeof(intptr_t) == 0, "bad size");
   847   }
   973   do_tag((int)size);
   848 
   974   while (size > 0) {
   849   void do_u4(u4* p) {
   975     _dump_region->append_intptr_t(*(intptr_t*)start);
   850     void* ptr = (void*)(uintx(*p));
   976     start += sizeof(intptr_t);
   851     do_ptr(&ptr);
   977     size -= sizeof(intptr_t);
   852   }
   978   }
   853 
   979 }
   854   void do_tag(int tag) {
       
   855     _dump_region->append_intptr_t((intptr_t)tag);
       
   856   }
       
   857 
       
   858   void do_oop(oop* o) {
       
   859     if (*o == NULL) {
       
   860       _dump_region->append_intptr_t(0);
       
   861     } else {
       
   862       assert(HeapShared::is_heap_object_archiving_allowed(),
       
   863              "Archiving heap object is not allowed");
       
   864       _dump_region->append_intptr_t(
       
   865         (intptr_t)CompressedOops::encode_not_null(*o));
       
   866     }
       
   867   }
       
   868 
       
   869   void do_region(u_char* start, size_t size) {
       
   870     assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
       
   871     assert(size % sizeof(intptr_t) == 0, "bad size");
       
   872     do_tag((int)size);
       
   873     while (size > 0) {
       
   874       _dump_region->append_intptr_t(*(intptr_t*)start);
       
   875       start += sizeof(intptr_t);
       
   876       size -= sizeof(intptr_t);
       
   877     }
       
   878   }
       
   879 
       
   880   bool reading() const { return false; }
       
   881 };
       
   882 
   980 
   883 // This is for dumping detailed statistics for the allocations
   981 // This is for dumping detailed statistics for the allocations
   884 // in the shared spaces.
   982 // in the shared spaces.
   885 class DumpAllocStats : public ResourceObj {
   983 class DumpAllocStats : public ResourceObj {
   886 public:
   984 public:
  1164   class ShallowCopier: public UniqueMetaspaceClosure {
  1262   class ShallowCopier: public UniqueMetaspaceClosure {
  1165     bool _read_only;
  1263     bool _read_only;
  1166   public:
  1264   public:
  1167     ShallowCopier(bool read_only) : _read_only(read_only) {}
  1265     ShallowCopier(bool read_only) : _read_only(read_only) {}
  1168 
  1266 
  1169     virtual void do_unique_ref(Ref* ref, bool read_only) {
  1267     virtual bool do_unique_ref(Ref* ref, bool read_only) {
  1170       if (read_only == _read_only) {
  1268       if (read_only == _read_only) {
  1171         allocate(ref, read_only);
  1269         allocate(ref, read_only);
  1172       }
  1270       }
       
  1271       return true; // recurse into ref.obj()
  1173     }
  1272     }
  1174   };
  1273   };
  1175 
  1274 
  1176   // Relocate embedded pointers within a MetaspaceObj's shallow copy
  1275   // Relocate embedded pointers within a MetaspaceObj's shallow copy
  1177   class ShallowCopyEmbeddedRefRelocator: public UniqueMetaspaceClosure {
  1276   class ShallowCopyEmbeddedRefRelocator: public UniqueMetaspaceClosure {
  1178   public:
  1277   public:
  1179     virtual void do_unique_ref(Ref* ref, bool read_only) {
  1278     virtual bool do_unique_ref(Ref* ref, bool read_only) {
  1180       address new_loc = get_new_loc(ref);
  1279       address new_loc = get_new_loc(ref);
  1181       RefRelocator refer;
  1280       RefRelocator refer;
  1182       ref->metaspace_pointers_do_at(&refer, new_loc);
  1281       ref->metaspace_pointers_do_at(&refer, new_loc);
       
  1282       return true; // recurse into ref.obj()
  1183     }
  1283     }
  1184   };
  1284   };
  1185 
  1285 
  1186   // Relocate a reference to point to its shallow copy
  1286   // Relocate a reference to point to its shallow copy
  1187   class RefRelocator: public MetaspaceClosure {
  1287   class RefRelocator: public MetaspaceClosure {
  1292     FileMapInfo::metaspace_pointers_do(it);
  1392     FileMapInfo::metaspace_pointers_do(it);
  1293     SystemDictionaryShared::dumptime_classes_do(it);
  1393     SystemDictionaryShared::dumptime_classes_do(it);
  1294     Universe::metaspace_pointers_do(it);
  1394     Universe::metaspace_pointers_do(it);
  1295     SymbolTable::metaspace_pointers_do(it);
  1395     SymbolTable::metaspace_pointers_do(it);
  1296     vmSymbols::metaspace_pointers_do(it);
  1396     vmSymbols::metaspace_pointers_do(it);
       
  1397 
       
  1398     it->finish();
  1297   }
  1399   }
  1298 
  1400 
  1299   static Klass* get_relocated_klass(Klass* orig_klass) {
  1401   static Klass* get_relocated_klass(Klass* orig_klass) {
  1300     assert(DumpSharedSpaces, "dump time only");
  1402     assert(DumpSharedSpaces, "dump time only");
  1301     address* pp = _new_loc_table->lookup((address)orig_klass);
  1403     address* pp = _new_loc_table->lookup((address)orig_klass);
  1333   tty->print_cr("done. ");
  1435   tty->print_cr("done. ");
  1334 
  1436 
  1335   SystemDictionaryShared::write_to_archive();
  1437   SystemDictionaryShared::write_to_archive();
  1336 
  1438 
  1337   char* start = _ro_region.top();
  1439   char* start = _ro_region.top();
       
  1440 
       
  1441   size_t vtptrs_bytes = _num_cloned_vtable_kinds * sizeof(intptr_t*);
       
  1442   _cloned_cpp_vtptrs = (intptr_t**)_ro_region.allocate(vtptrs_bytes, sizeof(intptr_t*));
  1338 
  1443 
  1339   // Write the other data to the output array.
  1444   // Write the other data to the output array.
  1340   WriteClosure wc(&_ro_region);
  1445   WriteClosure wc(&_ro_region);
  1341   MetaspaceShared::serialize(&wc);
  1446   MetaspaceShared::serialize(&wc);
  1342 
  1447 
  1352   // (1) Metaspace::allocate might trigger GC if we have run out of
  1457   // (1) Metaspace::allocate might trigger GC if we have run out of
  1353   //     committed metaspace, but we can't GC because we're running
  1458   //     committed metaspace, but we can't GC because we're running
  1354   //     in the VM thread.
  1459   //     in the VM thread.
  1355   // (2) ArchiveCompactor needs to work with a stable set of MetaspaceObjs.
  1460   // (2) ArchiveCompactor needs to work with a stable set of MetaspaceObjs.
  1356   Metaspace::freeze();
  1461   Metaspace::freeze();
       
  1462   DEBUG_ONLY(SystemDictionaryShared::NoClassLoadingMark nclm);
  1357 
  1463 
  1358   Thread* THREAD = VMThread::vm_thread();
  1464   Thread* THREAD = VMThread::vm_thread();
  1359 
  1465 
  1360   FileMapInfo::check_nonempty_dir_in_shared_path_table();
  1466   FileMapInfo::check_nonempty_dir_in_shared_path_table();
  1361 
  1467 
  1439   // We don't want to write these addresses into the archive.
  1545   // We don't want to write these addresses into the archive.
  1440   MetaspaceShared::zero_cpp_vtable_clones_for_writing();
  1546   MetaspaceShared::zero_cpp_vtable_clones_for_writing();
  1441 
  1547 
  1442   // Create and write the archive file that maps the shared spaces.
  1548   // Create and write the archive file that maps the shared spaces.
  1443 
  1549 
  1444   FileMapInfo* mapinfo = new FileMapInfo();
  1550   FileMapInfo* mapinfo = new FileMapInfo(true);
  1445   mapinfo->populate_header(os::vm_allocation_granularity());
  1551   mapinfo->populate_header(os::vm_allocation_granularity());
  1446   mapinfo->set_read_only_tables_start(read_only_tables_start);
  1552   mapinfo->set_read_only_tables_start(read_only_tables_start);
  1447   mapinfo->set_misc_data_patching_start(vtbl_list);
  1553   mapinfo->set_misc_data_patching_start(vtbl_list);
  1448   mapinfo->set_cds_i2i_entry_code_buffers(MetaspaceShared::cds_i2i_entry_code_buffers());
  1554   mapinfo->set_cds_i2i_entry_code_buffers(MetaspaceShared::cds_i2i_entry_code_buffers());
  1449   mapinfo->set_cds_i2i_entry_code_buffers_size(MetaspaceShared::cds_i2i_entry_code_buffers_size());
  1555   mapinfo->set_cds_i2i_entry_code_buffers_size(MetaspaceShared::cds_i2i_entry_code_buffers_size());
  1814     oopmaps->append(info);
  1920     oopmaps->append(info);
  1815   }
  1921   }
  1816 }
  1922 }
  1817 #endif // INCLUDE_CDS_JAVA_HEAP
  1923 #endif // INCLUDE_CDS_JAVA_HEAP
  1818 
  1924 
  1819 // Closure for serializing initialization data in from a data area
  1925 void ReadClosure::do_ptr(void** p) {
  1820 // (ptr_array) read from the shared file.
  1926   assert(*p == NULL, "initializing previous initialized pointer.");
  1821 
  1927   intptr_t obj = nextPtr();
  1822 class ReadClosure : public SerializeClosure {
  1928   assert((intptr_t)obj >= 0 || (intptr_t)obj < -100,
  1823 private:
  1929          "hit tag while initializing ptrs.");
  1824   intptr_t** _ptr_array;
  1930   *p = (void*)obj;
  1825 
  1931 }
  1826   inline intptr_t nextPtr() {
  1932 
  1827     return *(*_ptr_array)++;
  1933 void ReadClosure::do_u4(u4* p) {
  1828   }
  1934   intptr_t obj = nextPtr();
  1829 
  1935   *p = (u4)(uintx(obj));
  1830 public:
  1936 }
  1831   ReadClosure(intptr_t** ptr_array) { _ptr_array = ptr_array; }
  1937 
  1832 
  1938 void ReadClosure::do_tag(int tag) {
  1833   void do_ptr(void** p) {
  1939   int old_tag;
  1834     assert(*p == NULL, "initializing previous initialized pointer.");
  1940   old_tag = (int)(intptr_t)nextPtr();
  1835     intptr_t obj = nextPtr();
  1941   // do_int(&old_tag);
  1836     assert((intptr_t)obj >= 0 || (intptr_t)obj < -100,
  1942   assert(tag == old_tag, "old tag doesn't match");
  1837            "hit tag while initializing ptrs.");
  1943   FileMapInfo::assert_mark(tag == old_tag);
  1838     *p = (void*)obj;
  1944 }
  1839   }
  1945 
  1840 
  1946 void ReadClosure::do_oop(oop *p) {
  1841   void do_u4(u4* p) {
  1947   narrowOop o = (narrowOop)nextPtr();
  1842     intptr_t obj = nextPtr();
  1948   if (o == 0 || !HeapShared::open_archive_heap_region_mapped()) {
  1843     *p = (u4)(uintx(obj));
  1949     p = NULL;
  1844   }
  1950   } else {
  1845 
  1951     assert(HeapShared::is_heap_object_archiving_allowed(),
  1846   void do_tag(int tag) {
  1952            "Archived heap object is not allowed");
  1847     int old_tag;
  1953     assert(HeapShared::open_archive_heap_region_mapped(),
  1848     old_tag = (int)(intptr_t)nextPtr();
  1954            "Open archive heap region is not mapped");
  1849     // do_int(&old_tag);
  1955     *p = HeapShared::decode_from_archive(o);
  1850     assert(tag == old_tag, "old tag doesn't match");
  1956   }
  1851     FileMapInfo::assert_mark(tag == old_tag);
  1957 }
  1852   }
  1958 
  1853 
  1959 void ReadClosure::do_region(u_char* start, size_t size) {
  1854   void do_oop(oop *p) {
  1960   assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
  1855     narrowOop o = (narrowOop)nextPtr();
  1961   assert(size % sizeof(intptr_t) == 0, "bad size");
  1856     if (o == 0 || !HeapShared::open_archive_heap_region_mapped()) {
  1962   do_tag((int)size);
  1857       p = NULL;
  1963   while (size > 0) {
  1858     } else {
  1964     *(intptr_t*)start = nextPtr();
  1859       assert(HeapShared::is_heap_object_archiving_allowed(),
  1965     start += sizeof(intptr_t);
  1860              "Archived heap object is not allowed");
  1966     size -= sizeof(intptr_t);
  1861       assert(HeapShared::open_archive_heap_region_mapped(),
  1967   }
  1862              "Open archive heap region is not mapped");
  1968 }
  1863       *p = HeapShared::decode_from_archive(o);
  1969 
  1864     }
  1970 void MetaspaceShared::set_shared_metaspace_range(void* base, void* top) {
  1865   }
  1971   _shared_metaspace_static_top = top;
  1866 
  1972   MetaspaceObj::set_shared_metaspace_range(base, top);
  1867   void do_region(u_char* start, size_t size) {
  1973 }
  1868     assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
       
  1869     assert(size % sizeof(intptr_t) == 0, "bad size");
       
  1870     do_tag((int)size);
       
  1871     while (size > 0) {
       
  1872       *(intptr_t*)start = nextPtr();
       
  1873       start += sizeof(intptr_t);
       
  1874       size -= sizeof(intptr_t);
       
  1875     }
       
  1876   }
       
  1877 
       
  1878   bool reading() const { return true; }
       
  1879 };
       
  1880 
  1974 
  1881 // Return true if given address is in the misc data region
  1975 // Return true if given address is in the misc data region
  1882 bool MetaspaceShared::is_in_shared_region(const void* p, int idx) {
  1976 bool MetaspaceShared::is_in_shared_region(const void* p, int idx) {
  1883   return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_region(p, idx);
  1977   return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_region(p, idx);
  1884 }
  1978 }
  1886 bool MetaspaceShared::is_in_trampoline_frame(address addr) {
  1980 bool MetaspaceShared::is_in_trampoline_frame(address addr) {
  1887   if (UseSharedSpaces && is_in_shared_region(addr, MetaspaceShared::mc)) {
  1981   if (UseSharedSpaces && is_in_shared_region(addr, MetaspaceShared::mc)) {
  1888     return true;
  1982     return true;
  1889   }
  1983   }
  1890   return false;
  1984   return false;
       
  1985 }
       
  1986 
       
  1987 bool MetaspaceShared::is_shared_dynamic(void* p) {
       
  1988   if ((p < MetaspaceObj::shared_metaspace_top()) &&
       
  1989       (p >= _shared_metaspace_static_top)) {
       
  1990     return true;
       
  1991   } else {
       
  1992     return false;
       
  1993   }
  1891 }
  1994 }
  1892 
  1995 
  1893 // Map shared spaces at requested addresses and return if succeeded.
  1996 // Map shared spaces at requested addresses and return if succeeded.
  1894 bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) {
  1997 bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) {
  1895   size_t image_alignment = mapinfo->alignment();
  1998   size_t image_alignment = mapinfo->alignment();
  1902   if (!shared_rs.is_reserved()) return false;
  2005   if (!shared_rs.is_reserved()) return false;
  1903 #endif
  2006 #endif
  1904 
  2007 
  1905   assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
  2008   assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
  1906 
  2009 
  1907   char* ro_base = NULL; char* ro_top;
       
  1908   char* rw_base = NULL; char* rw_top;
       
  1909   char* mc_base = NULL; char* mc_top;
       
  1910   char* md_base = NULL; char* md_top;
       
  1911 
       
  1912   // Map each shared region
  2010   // Map each shared region
  1913   if ((mc_base = mapinfo->map_region(mc, &mc_top)) != NULL &&
  2011   int regions[] = {mc, rw, ro, md};
  1914       (rw_base = mapinfo->map_region(rw, &rw_top)) != NULL &&
  2012   size_t len = sizeof(regions)/sizeof(int);
  1915       (ro_base = mapinfo->map_region(ro, &ro_top)) != NULL &&
  2013   char* saved_base[] = {NULL, NULL, NULL, NULL};
  1916       (md_base = mapinfo->map_region(md, &md_top)) != NULL &&
  2014   char* top = mapinfo->map_regions(regions, saved_base, len );
       
  2015 
       
  2016   if (top != NULL &&
  1917       (image_alignment == (size_t)os::vm_allocation_granularity()) &&
  2017       (image_alignment == (size_t)os::vm_allocation_granularity()) &&
  1918       mapinfo->validate_shared_path_table()) {
  2018       mapinfo->validate_shared_path_table()) {
  1919     // Success -- set up MetaspaceObj::_shared_metaspace_{base,top} for
  2019     // Success -- set up MetaspaceObj::_shared_metaspace_{base,top} for
  1920     // fast checking in MetaspaceShared::is_in_shared_metaspace() and
  2020     // fast checking in MetaspaceShared::is_in_shared_metaspace() and
  1921     // MetaspaceObj::is_shared().
  2021     // MetaspaceObj::is_shared().
  1922     //
       
  1923     // We require that mc->rw->ro->md to be laid out consecutively, with no
       
  1924     // gaps between them. That way, we can ensure that the OS won't be able to
       
  1925     // allocate any new memory spaces inside _shared_metaspace_{base,top}, which
       
  1926     // would mess up the simple comparision in MetaspaceShared::is_in_shared_metaspace().
       
  1927     assert(mc_base < ro_base && mc_base < rw_base && mc_base < md_base, "must be");
       
  1928     assert(md_top  > ro_top  && md_top  > rw_top  && md_top  > mc_top , "must be");
       
  1929     assert(mc_top == rw_base, "must be");
       
  1930     assert(rw_top == ro_base, "must be");
       
  1931     assert(ro_top == md_base, "must be");
       
  1932 
       
  1933     _core_spaces_size = mapinfo->core_spaces_size();
  2022     _core_spaces_size = mapinfo->core_spaces_size();
  1934     MetaspaceObj::set_shared_metaspace_range((void*)mc_base, (void*)md_top);
  2023     set_shared_metaspace_range((void*)saved_base[0], (void*)top);
  1935     return true;
  2024     return true;
  1936   } else {
  2025   } else {
  1937     // If there was a failure in mapping any of the spaces, unmap the ones
  2026     mapinfo->unmap_regions(regions, saved_base, len);
  1938     // that succeeded
       
  1939     if (ro_base != NULL) mapinfo->unmap_region(ro);
       
  1940     if (rw_base != NULL) mapinfo->unmap_region(rw);
       
  1941     if (mc_base != NULL) mapinfo->unmap_region(mc);
       
  1942     if (md_base != NULL) mapinfo->unmap_region(md);
       
  1943 #ifndef _WINDOWS
  2027 #ifndef _WINDOWS
  1944     // Release the entire mapped region
  2028     // Release the entire mapped region
  1945     shared_rs.release();
  2029     shared_rs.release();
  1946 #endif
  2030 #endif
  1947     // If -Xshare:on is specified, print out the error message and exit VM,
  2031     // If -Xshare:on is specified, print out the error message and exit VM,
  1968   clone_cpp_vtables((intptr_t*)buffer);
  2052   clone_cpp_vtables((intptr_t*)buffer);
  1969 
  2053 
  1970   // The rest of the data is now stored in the RW region
  2054   // The rest of the data is now stored in the RW region
  1971   buffer = mapinfo->read_only_tables_start();
  2055   buffer = mapinfo->read_only_tables_start();
  1972 
  2056 
       
  2057   // Skip over _cloned_cpp_vtptrs;
       
  2058   buffer += _num_cloned_vtable_kinds * sizeof(intptr_t*);
       
  2059 
  1973   // Verify various attributes of the archive, plus initialize the
  2060   // Verify various attributes of the archive, plus initialize the
  1974   // shared string/symbol tables
  2061   // shared string/symbol tables
  1975   intptr_t* array = (intptr_t*)buffer;
  2062   intptr_t* array = (intptr_t*)buffer;
  1976   ReadClosure rc(&array);
  2063   ReadClosure rc(&array);
  1977   serialize(&rc);
  2064   serialize(&rc);
  2006   if (UseSharedSpaces) {
  2093   if (UseSharedSpaces) {
  2007     // remap the shared readonly space to shared readwrite, private
  2094     // remap the shared readonly space to shared readwrite, private
  2008     FileMapInfo* mapinfo = FileMapInfo::current_info();
  2095     FileMapInfo* mapinfo = FileMapInfo::current_info();
  2009     if (!mapinfo->remap_shared_readonly_as_readwrite()) {
  2096     if (!mapinfo->remap_shared_readonly_as_readwrite()) {
  2010       return false;
  2097       return false;
       
  2098     }
       
  2099     if (FileMapInfo::dynamic_info() != NULL) {
       
  2100       mapinfo = FileMapInfo::dynamic_info();
       
  2101       if (!mapinfo->remap_shared_readonly_as_readwrite()) {
       
  2102         return false;
       
  2103       }
  2011     }
  2104     }
  2012     _remapped_readwrite = true;
  2105     _remapped_readwrite = true;
  2013   }
  2106   }
  2014   return true;
  2107   return true;
  2015 }
  2108 }