diff -r e0d59f0c2b7d -r 22ee476cc664 src/hotspot/share/memory/metaspaceShared.cpp --- a/src/hotspot/share/memory/metaspaceShared.cpp Thu Nov 14 10:02:52 2019 +0800 +++ b/src/hotspot/share/memory/metaspaceShared.cpp Wed Nov 13 16:36:54 2019 -0800 @@ -41,6 +41,8 @@ #include "interpreter/bytecodes.hpp" #include "logging/log.hpp" #include "logging/logMessage.hpp" +#include "memory/archiveUtils.inline.hpp" +#include "memory/dynamicArchive.hpp" #include "memory/filemap.hpp" #include "memory/heapShared.inline.hpp" #include "memory/metaspace.hpp" @@ -48,7 +50,6 @@ #include "memory/metaspaceShared.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" -#include "memory/dynamicArchive.hpp" #include "oops/compressedOops.inline.hpp" #include "oops/instanceClassLoaderKlass.hpp" #include "oops/instanceMirrorKlass.hpp" @@ -67,7 +68,7 @@ #include "runtime/vmThread.hpp" #include "runtime/vmOperations.hpp" #include "utilities/align.hpp" -#include "utilities/bitMap.hpp" +#include "utilities/bitMap.inline.hpp" #include "utilities/defaultStream.hpp" #include "utilities/hashtable.inline.hpp" #if INCLUDE_G1GC @@ -82,8 +83,8 @@ bool MetaspaceShared::_remapped_readwrite = false; address MetaspaceShared::_i2i_entry_code_buffers = NULL; size_t MetaspaceShared::_i2i_entry_code_buffers_size = 0; -size_t MetaspaceShared::_core_spaces_size = 0; void* MetaspaceShared::_shared_metaspace_static_top = NULL; +intx MetaspaceShared::_relocation_delta; // The CDS archive is divided into the following regions: // mc - misc code (the method entry trampolines) @@ -147,9 +148,21 @@ return p; } +void DumpRegion::append_intptr_t(intptr_t n, bool need_to_mark) { + assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment"); + intptr_t *p = (intptr_t*)_top; + char* newtop = _top + sizeof(intptr_t); + expand_top_to(newtop); + *p = n; + if (need_to_mark) { + ArchivePtrMarker::mark_pointer(p); + } +} + void DumpRegion::print(size_t total_bytes) const { tty->print_cr("%-3s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT, - _name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()), p2i(_base)); + _name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()), + p2i(_base + MetaspaceShared::final_delta())); } void DumpRegion::print_out_of_space_msg(const char* failing_region, size_t needed_bytes) { @@ -172,14 +185,14 @@ } } -DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md"); -size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0; +static DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md"); +static size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0; void MetaspaceShared::init_shared_dump_space(DumpRegion* first_space, address first_space_bottom) { // Start with 0 committed bytes. The memory will be committed as needed by // MetaspaceShared::commit_shared_space_to(). if (!_shared_vs.initialize(_shared_rs, 0)) { - vm_exit_during_initialization("Unable to allocate memory for shared space"); + fatal("Unable to allocate memory for shared space"); } first_space->init(&_shared_rs, (char*)first_space_bottom); } @@ -209,73 +222,32 @@ return _ro_region.allocate(num_bytes); } -void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() { - assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled"); - - // If using shared space, open the file that contains the shared space - // and map in the memory before initializing the rest of metaspace (so - // the addresses don't conflict) - FileMapInfo* mapinfo = new FileMapInfo(true); - - // Open the shared archive file, read and validate the header. If - // initialization fails, shared spaces [UseSharedSpaces] are - // disabled and the file is closed. - // Map in spaces now also - if (mapinfo->initialize(true) && map_shared_spaces(mapinfo)) { - size_t cds_total = core_spaces_size(); - address cds_address = (address)mapinfo->region_addr(0); - char* cds_end = (char *)align_up(cds_address + cds_total, - Metaspace::reserve_alignment()); - - // Mapping the dynamic archive before allocating the class space - cds_end = initialize_dynamic_runtime_shared_spaces((char*)cds_address, cds_end); - -#ifdef _LP64 - if (Metaspace::using_class_space()) { - // If UseCompressedClassPointers is set then allocate the metaspace area - // above the heap and above the CDS area (if it exists). - Metaspace::allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address); - // map_heap_regions() compares the current narrow oop and klass encodings - // with the archived ones, so it must be done after all encodings are determined. - mapinfo->map_heap_regions(); - } - CompressedKlassPointers::set_range(CompressedClassSpaceSize); -#endif // _LP64 +// When reserving an address range using ReservedSpace, we need an alignment that satisfies both: +// os::vm_allocation_granularity() -- so that we can sub-divide this range into multiple mmap regions, +// while keeping the first range at offset 0 of this range. +// Metaspace::reserve_alignment() -- so we can pass the region to +// Metaspace::allocate_metaspace_compressed_klass_ptrs. +size_t MetaspaceShared::reserved_space_alignment() { + size_t os_align = os::vm_allocation_granularity(); + size_t ms_align = Metaspace::reserve_alignment(); + if (os_align >= ms_align) { + assert(os_align % ms_align == 0, "must be a multiple"); + return os_align; } else { - assert(!mapinfo->is_open() && !UseSharedSpaces, - "archive file not closed or shared spaces not disabled."); + assert(ms_align % os_align == 0, "must be a multiple"); + return ms_align; } } -char* MetaspaceShared::initialize_dynamic_runtime_shared_spaces( - char* static_start, char* static_end) { - assert(UseSharedSpaces, "must be runtime"); - char* cds_end = static_end; - if (!DynamicDumpSharedSpaces) { - address dynamic_top = DynamicArchive::map(); - if (dynamic_top != NULL) { - assert(dynamic_top > (address)static_start, "Unexpected layout"); - MetaspaceObj::expand_shared_metaspace_range(dynamic_top); - cds_end = (char *)align_up(dynamic_top, Metaspace::reserve_alignment()); - } - } - return cds_end; -} - -ReservedSpace* MetaspaceShared::reserve_shared_rs(size_t size, size_t alignment, - bool large, char* requested_address) { - if (requested_address != NULL) { - _shared_rs = ReservedSpace(size, alignment, large, requested_address); - } else { - _shared_rs = ReservedSpace(size, alignment, large); - } - return &_shared_rs; +ReservedSpace MetaspaceShared::reserve_shared_space(size_t size, char* requested_address) { + bool large_pages = false; // Don't use large pages for the CDS archive. + assert(is_aligned(requested_address, reserved_space_alignment()), "must be"); + return ReservedSpace(size, reserved_space_alignment(), large_pages, requested_address); } void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() { assert(DumpSharedSpaces, "should be called for dump time only"); - const size_t reserve_alignment = Metaspace::reserve_alignment(); - bool large_pages = false; // No large pages when dumping the CDS archive. + const size_t reserve_alignment = reserved_space_alignment(); char* shared_base = (char*)align_up((char*)SharedBaseAddress, reserve_alignment); #ifdef _LP64 @@ -296,15 +268,22 @@ size_t cds_total = align_down(256*M, reserve_alignment); #endif + bool use_requested_base = true; + if (ArchiveRelocationMode == 1) { + log_info(cds)("ArchiveRelocationMode == 1: always allocate class space at an alternative address"); + use_requested_base = false; + } + // First try to reserve the space at the specified SharedBaseAddress. - //_shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages, shared_base); - reserve_shared_rs(cds_total, reserve_alignment, large_pages, shared_base); + assert(!_shared_rs.is_reserved(), "must be"); + if (use_requested_base) { + _shared_rs = reserve_shared_space(cds_total, shared_base); + } if (_shared_rs.is_reserved()) { assert(shared_base == 0 || _shared_rs.base() == shared_base, "should match"); } else { // Get a mmap region anywhere if the SharedBaseAddress fails. - //_shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages); - reserve_shared_rs(cds_total, reserve_alignment, large_pages, NULL); + _shared_rs = reserve_shared_space(cds_total); } if (!_shared_rs.is_reserved()) { vm_exit_during_initialization("Unable to reserve memory for shared space", @@ -442,6 +421,8 @@ assert(commit <= uncommitted, "sanity"); bool result = _shared_vs.expand_by(commit, false); + ArchivePtrMarker::expand_ptr_end((address*)_shared_vs.high()); + if (!result) { vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes", need_committed_size)); @@ -451,6 +432,10 @@ commit, _shared_vs.actual_committed_size(), _shared_vs.high()); } +void MetaspaceShared::initialize_ptr_marker(CHeapBitMap* ptrmap) { + ArchivePtrMarker::initialize(ptrmap, (address*)_shared_vs.low(), (address*)_shared_vs.high()); +} + // Read/write a data stream for restoring/preserving metadata pointers and // miscellaneous data from/to the shared archive file. @@ -469,6 +454,7 @@ soc->do_tag(sizeof(Symbol)); // Dump/restore miscellaneous metadata. + JavaClasses::serialize_offsets(soc); Universe::serialize(soc); soc->do_tag(--tag); @@ -482,7 +468,6 @@ HeapShared::serialize_subgraph_info_table_header(soc); SystemDictionaryShared::serialize_dictionary_headers(soc); - JavaClasses::serialize_offsets(soc); InstanceMirrorKlass::serialize_offsets(soc); soc->do_tag(--tag); @@ -705,7 +690,9 @@ // Switch the vtable pointer to point to the cloned vtable. static void patch(Metadata* obj) { assert(DumpSharedSpaces, "dump-time only"); + assert(MetaspaceShared::is_in_output_space(obj), "must be"); *(void**)obj = (void*)(_info->cloned_vtable()); + ArchivePtrMarker::mark_pointer(obj); } static bool is_valid_shared_object(const T* obj) { @@ -799,7 +786,8 @@ } #define ALLOC_CPP_VTABLE_CLONE(c) \ - _cloned_cpp_vtptrs[c##_Kind] = CppVtableCloner::allocate(#c); + _cloned_cpp_vtptrs[c##_Kind] = CppVtableCloner::allocate(#c); \ + ArchivePtrMarker::mark_pointer(&_cloned_cpp_vtptrs[c##_Kind]); #define CLONE_CPP_VTABLE(c) \ p = CppVtableCloner::clone_vtable(#c, (CppVtableInfo*)p); @@ -965,7 +953,7 @@ assert(size % sizeof(intptr_t) == 0, "bad size"); do_tag((int)size); while (size > 0) { - _dump_region->append_intptr_t(*(intptr_t*)start); + _dump_region->append_intptr_t(*(intptr_t*)start, true); start += sizeof(intptr_t); size -= sizeof(intptr_t); } @@ -1129,9 +1117,13 @@ GrowableArray* oopmaps); void dump_symbols(); char* dump_read_only_tables(); + void print_class_stats(); void print_region_stats(); + void print_bitmap_region_stats(size_t size, size_t total_size); void print_heap_region_stats(GrowableArray *heap_mem, - const char *name, const size_t total_size); + const char *name, size_t total_size); + void relocate_to_default_base_address(CHeapBitMap* ptrmap); + public: VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; } @@ -1276,6 +1268,15 @@ ref->metaspace_pointers_do_at(&refer, new_loc); return true; // recurse into ref.obj() } + virtual void push_special(SpecialRef type, Ref* ref, intptr_t* p) { + assert(type == _method_entry_ref, "only special type allowed for now"); + address obj = ref->obj(); + address new_obj = get_new_loc(ref); + size_t offset = pointer_delta(p, obj, sizeof(u1)); + intptr_t* new_p = (intptr_t*)(new_obj + offset); + assert(*p == *new_p, "must be a copy"); + ArchivePtrMarker::mark_pointer((address*)new_p); + } }; // Relocate a reference to point to its shallow copy @@ -1284,6 +1285,7 @@ virtual bool do_ref(Ref* ref, bool read_only) { if (ref->not_null()) { ref->update(get_new_loc(ref)); + ArchivePtrMarker::mark_pointer(ref->addr()); } return false; // Do not recurse. } @@ -1440,7 +1442,71 @@ return start; } +void VM_PopulateDumpSharedSpace::print_class_stats() { + tty->print_cr("Number of classes %d", _global_klass_objects->length()); + { + int num_type_array = 0, num_obj_array = 0, num_inst = 0; + for (int i = 0; i < _global_klass_objects->length(); i++) { + Klass* k = _global_klass_objects->at(i); + if (k->is_instance_klass()) { + num_inst ++; + } else if (k->is_objArray_klass()) { + num_obj_array ++; + } else { + assert(k->is_typeArray_klass(), "sanity"); + num_type_array ++; + } + } + tty->print_cr(" instance classes = %5d", num_inst); + tty->print_cr(" obj array classes = %5d", num_obj_array); + tty->print_cr(" type array classes = %5d", num_type_array); + } +} + +void VM_PopulateDumpSharedSpace::relocate_to_default_base_address(CHeapBitMap* ptrmap) { + intx addr_delta = MetaspaceShared::final_delta(); + if (addr_delta == 0) { + ArchivePtrMarker::compact((address)SharedBaseAddress, (address)_md_region.top()); + } else { + // We are not able to reserve space at Arguments::default_SharedBaseAddress() (due to ASLR). + // This means that the current content of the archive is based on a random + // address. Let's relocate all the pointers, so that it can be mapped to + // Arguments::default_SharedBaseAddress() without runtime relocation. + // + // Note: both the base and dynamic archive are written with + // FileMapHeader::_shared_base_address == Arguments::default_SharedBaseAddress() + + // Patch all pointers that are marked by ptrmap within this region, + // where we have just dumped all the metaspace data. + address patch_base = (address)SharedBaseAddress; + address patch_end = (address)_md_region.top(); + size_t size = patch_end - patch_base; + + // the current value of the pointers to be patched must be within this + // range (i.e., must point to valid metaspace objects) + address valid_old_base = patch_base; + address valid_old_end = patch_end; + + // after patching, the pointers must point inside this range + // (the requested location of the archive, as mapped at runtime). + address valid_new_base = (address)Arguments::default_SharedBaseAddress(); + address valid_new_end = valid_new_base + size; + + log_debug(cds)("Relocating archive from [" INTPTR_FORMAT " - " INTPTR_FORMAT " ] to " + "[" INTPTR_FORMAT " - " INTPTR_FORMAT " ]", p2i(patch_base), p2i(patch_end), + p2i(valid_new_base), p2i(valid_new_end)); + + SharedDataRelocator patcher((address*)patch_base, (address*)patch_end, valid_old_base, valid_old_end, + valid_new_base, valid_new_end, addr_delta, ptrmap); + ptrmap->iterate(&patcher); + ArchivePtrMarker::compact(patcher.max_non_null_offset()); + } +} + void VM_PopulateDumpSharedSpace::doit() { + CHeapBitMap ptrmap; + MetaspaceShared::initialize_ptr_marker(&ptrmap); + // We should no longer allocate anything from the metaspace, so that: // // (1) Metaspace::allocate might trigger GC if we have run out of @@ -1472,24 +1538,7 @@ CollectClassesClosure collect_classes; ClassLoaderDataGraph::loaded_classes_do(&collect_classes); - tty->print_cr("Number of classes %d", _global_klass_objects->length()); - { - int num_type_array = 0, num_obj_array = 0, num_inst = 0; - for (int i = 0; i < _global_klass_objects->length(); i++) { - Klass* k = _global_klass_objects->at(i); - if (k->is_instance_klass()) { - num_inst ++; - } else if (k->is_objArray_klass()) { - num_obj_array ++; - } else { - assert(k->is_typeArray_klass(), "sanity"); - num_type_array ++; - } - } - tty->print_cr(" instance classes = %5d", num_inst); - tty->print_cr(" obj array classes = %5d", num_obj_array); - tty->print_cr(" type array classes = %5d", num_type_array); - } + print_class_stats(); // Ensure the ConstMethods won't be modified at run-time tty->print("Updating ConstMethods ... "); @@ -1520,12 +1569,6 @@ MetaspaceShared::allocate_cpp_vtable_clones(); _md_region.pack(); - // The 4 core spaces are allocated consecutively mc->rw->ro->md, so there total size - // is just the spaces between the two ends. - size_t core_spaces_size = _md_region.end() - _mc_region.base(); - assert(core_spaces_size == (size_t)align_up(core_spaces_size, Metaspace::reserve_alignment()), - "should already be aligned"); - // During patching, some virtual methods may be called, so at this point // the vtables must contain valid methods (as filled in by CppVtableCloner::allocate). MetaspaceShared::patch_cpp_vtable_pointers(); @@ -1534,6 +1577,10 @@ // We don't want to write these addresses into the archive. MetaspaceShared::zero_cpp_vtable_clones_for_writing(); + // relocate the data so that it can be mapped to Arguments::default_SharedBaseAddress() + // without runtime relocation. + relocate_to_default_base_address(&ptrmap); + // Create and write the archive file that maps the shared spaces. FileMapInfo* mapinfo = new FileMapInfo(true); @@ -1542,7 +1589,6 @@ mapinfo->set_misc_data_patching_start(vtbl_list); mapinfo->set_i2i_entry_code_buffers(MetaspaceShared::i2i_entry_code_buffers(), MetaspaceShared::i2i_entry_code_buffers_size()); - mapinfo->set_core_spaces_size(core_spaces_size); mapinfo->open_for_write(); // NOTE: md contains the trampoline code for method entries, which are patched at run time, @@ -1552,6 +1598,8 @@ write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false); write_region(mapinfo, MetaspaceShared::md, &_md_region, /*read_only=*/false,/*allow_exec=*/false); + mapinfo->write_bitmap_region(ArchivePtrMarker::ptrmap()); + _total_closed_archive_region_size = mapinfo->write_archive_heap_regions( _closed_archive_heap_regions, _closed_archive_heap_oopmaps, @@ -1563,6 +1611,7 @@ MetaspaceShared::first_open_archive_heap_region, MetaspaceShared::max_open_archive_heap_region); + mapinfo->set_final_requested_base((char*)Arguments::default_SharedBaseAddress()); mapinfo->set_header_crc(mapinfo->compute_header_crc()); mapinfo->write_header(); mapinfo->close(); @@ -1594,12 +1643,16 @@ void VM_PopulateDumpSharedSpace::print_region_stats() { // Print statistics of all the regions + const size_t bitmap_used = ArchivePtrMarker::ptrmap()->size_in_bytes(); + const size_t bitmap_reserved = align_up(bitmap_used, Metaspace::reserve_alignment()); const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() + _mc_region.reserved() + _md_region.reserved() + + bitmap_reserved + _total_closed_archive_region_size + _total_open_archive_region_size; const size_t total_bytes = _ro_region.used() + _rw_region.used() + _mc_region.used() + _md_region.used() + + bitmap_used + _total_closed_archive_region_size + _total_open_archive_region_size; const double total_u_perc = percent_of(total_bytes, total_reserved); @@ -1608,6 +1661,7 @@ _rw_region.print(total_reserved); _ro_region.print(total_reserved); _md_region.print(total_reserved); + print_bitmap_region_stats(bitmap_reserved, total_reserved); print_heap_region_stats(_closed_archive_heap_regions, "ca", total_reserved); print_heap_region_stats(_open_archive_heap_regions, "oa", total_reserved); @@ -1615,8 +1669,13 @@ total_bytes, total_reserved, total_u_perc); } +void VM_PopulateDumpSharedSpace::print_bitmap_region_stats(size_t size, size_t total_size) { + tty->print_cr("bm space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT, + size, size/double(total_size)*100.0, size, p2i(NULL)); +} + void VM_PopulateDumpSharedSpace::print_heap_region_stats(GrowableArray *heap_mem, - const char *name, const size_t total_size) { + const char *name, size_t total_size) { int arr_len = heap_mem == NULL ? 0 : heap_mem->length(); for (int i = 0; i < arr_len; i++) { char* start = (char*)heap_mem->at(i).start(); @@ -1636,9 +1695,13 @@ o->set_klass(k); } -Klass* MetaspaceShared::get_relocated_klass(Klass *k) { +Klass* MetaspaceShared::get_relocated_klass(Klass *k, bool is_final) { assert(DumpSharedSpaces, "sanity"); - return ArchiveCompactor::get_relocated_klass(k); + k = ArchiveCompactor::get_relocated_klass(k); + if (is_final) { + k = (Klass*)(address(k) + final_delta()); + } + return k; } class LinkSharedClassesClosure : public KlassClosure { @@ -1947,8 +2010,9 @@ } } -void MetaspaceShared::set_shared_metaspace_range(void* base, void* top) { - _shared_metaspace_static_top = top; +void MetaspaceShared::set_shared_metaspace_range(void* base, void *static_top, void* top) { + assert(base <= static_top && static_top <= top, "must be"); + _shared_metaspace_static_top = static_top; MetaspaceObj::set_shared_metaspace_range(base, top); } @@ -1973,49 +2037,312 @@ } } -// Map shared spaces at requested addresses and return if succeeded. -bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) { - size_t image_alignment = mapinfo->alignment(); +void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() { + assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled"); + MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE; + FileMapInfo* static_mapinfo = open_static_archive(); + FileMapInfo* dynamic_mapinfo = NULL; + + if (static_mapinfo != NULL) { + dynamic_mapinfo = open_dynamic_archive(); + + // First try to map at the requested address + result = map_archives(static_mapinfo, dynamic_mapinfo, true); + if (result == MAP_ARCHIVE_MMAP_FAILURE) { + // Mapping has failed (probably due to ASLR). Let's map at an address chosen + // by the OS. + result = map_archives(static_mapinfo, dynamic_mapinfo, false); + } + } + + if (result == MAP_ARCHIVE_SUCCESS) { + bool dynamic_mapped = (dynamic_mapinfo != NULL && dynamic_mapinfo->is_mapped()); + char* cds_base = static_mapinfo->mapped_base(); + char* cds_end = dynamic_mapped ? dynamic_mapinfo->mapped_end() : static_mapinfo->mapped_end(); + set_shared_metaspace_range(cds_base, static_mapinfo->mapped_end(), cds_end); + _relocation_delta = static_mapinfo->relocation_delta(); + if (dynamic_mapped) { + FileMapInfo::set_shared_path_table(dynamic_mapinfo); + } else { + FileMapInfo::set_shared_path_table(static_mapinfo); + } + } else { + set_shared_metaspace_range(NULL, NULL, NULL); + UseSharedSpaces = false; + FileMapInfo::fail_continue("Unable to map shared spaces"); + if (PrintSharedArchiveAndExit) { + vm_exit_during_initialization("Unable to use shared archive."); + } + } + + if (static_mapinfo != NULL && !static_mapinfo->is_mapped()) { + delete static_mapinfo; + } + if (dynamic_mapinfo != NULL && !dynamic_mapinfo->is_mapped()) { + delete dynamic_mapinfo; + } +} + +FileMapInfo* MetaspaceShared::open_static_archive() { + FileMapInfo* mapinfo = new FileMapInfo(true); + if (!mapinfo->initialize()) { + delete(mapinfo); + return NULL; + } + return mapinfo; +} + +FileMapInfo* MetaspaceShared::open_dynamic_archive() { + if (DynamicDumpSharedSpaces) { + return NULL; + } + if (Arguments::GetSharedDynamicArchivePath() == NULL) { + return NULL; + } -#ifndef _WINDOWS - // Map in the shared memory and then map the regions on top of it. - // On Windows, don't map the memory here because it will cause the - // mappings of the regions to fail. - ReservedSpace shared_rs = mapinfo->reserve_shared_memory(); - if (!shared_rs.is_reserved()) return false; -#endif + FileMapInfo* mapinfo = new FileMapInfo(false); + if (!mapinfo->initialize()) { + delete(mapinfo); + return NULL; + } + return mapinfo; +} + +// use_requested_addr: +// true = map at FileMapHeader::_requested_base_address +// false = map at an alternative address picked by OS. +MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, FileMapInfo* dynamic_mapinfo, + bool use_requested_addr) { + PRODUCT_ONLY(if (ArchiveRelocationMode == 1 && use_requested_addr) { + // For product build only -- this is for benchmarking the cost of doing relocation. + // For debug builds, the check is done in FileMapInfo::map_regions for better test coverage. + log_info(cds)("ArchiveRelocationMode == 1: always map archive(s) at an alternative address"); + return MAP_ARCHIVE_MMAP_FAILURE; + }); + + if (ArchiveRelocationMode == 2 && !use_requested_addr) { + log_info(cds)("ArchiveRelocationMode == 2: never map archive(s) at an alternative address"); + return MAP_ARCHIVE_MMAP_FAILURE; + }; + + if (dynamic_mapinfo != NULL) { + // Ensure that the OS won't be able to allocate new memory spaces between the two + // archives, or else it would mess up the simple comparision in MetaspaceObj::is_shared(). + assert(static_mapinfo->mapping_end_offset() == dynamic_mapinfo->mapping_base_offset(), "no gap"); + } - assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces"); + ReservedSpace main_rs, archive_space_rs, class_space_rs; + MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE; + char* mapped_base_address = reserve_address_space_for_archives(static_mapinfo, dynamic_mapinfo, + use_requested_addr, main_rs, archive_space_rs, + class_space_rs); + if (mapped_base_address == NULL) { + result = MAP_ARCHIVE_MMAP_FAILURE; + } else { + log_debug(cds)("Reserved archive_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (" SIZE_FORMAT ") bytes", + p2i(archive_space_rs.base()), p2i(archive_space_rs.end()), archive_space_rs.size()); + log_debug(cds)("Reserved class_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (" SIZE_FORMAT ") bytes", + p2i(class_space_rs.base()), p2i(class_space_rs.end()), class_space_rs.size()); + MapArchiveResult static_result = map_archive(static_mapinfo, mapped_base_address, archive_space_rs); + MapArchiveResult dynamic_result = (static_result == MAP_ARCHIVE_SUCCESS) ? + map_archive(dynamic_mapinfo, mapped_base_address, archive_space_rs) : MAP_ARCHIVE_OTHER_FAILURE; - // Map each shared region - int regions[] = {mc, rw, ro, md}; - size_t len = sizeof(regions)/sizeof(int); - char* saved_base[] = {NULL, NULL, NULL, NULL}; - char* top = mapinfo->map_regions(regions, saved_base, len ); + if (static_result == MAP_ARCHIVE_SUCCESS) { + if (dynamic_result == MAP_ARCHIVE_SUCCESS) { + result = MAP_ARCHIVE_SUCCESS; + } else if (dynamic_result == MAP_ARCHIVE_OTHER_FAILURE) { + assert(dynamic_mapinfo != NULL && !dynamic_mapinfo->is_mapped(), "must have failed"); + // No need to retry mapping the dynamic archive again, as it will never succeed + // (bad file, etc) -- just keep the base archive. + log_warning(cds, dynamic)("Unable to use shared archive. The top archive failed to load: %s", + dynamic_mapinfo->full_path()); + result = MAP_ARCHIVE_SUCCESS; + // TODO, we can give the unused space for the dynamic archive to class_space_rs, but there's no + // easy API to do that right now. + } else { + result = MAP_ARCHIVE_MMAP_FAILURE; + } + } else if (static_result == MAP_ARCHIVE_OTHER_FAILURE) { + result = MAP_ARCHIVE_OTHER_FAILURE; + } else { + result = MAP_ARCHIVE_MMAP_FAILURE; + } + } - if (top != NULL && - (image_alignment == (size_t)os::vm_allocation_granularity()) && - mapinfo->validate_shared_path_table()) { - // Success -- set up MetaspaceObj::_shared_metaspace_{base,top} for - // fast checking in MetaspaceShared::is_in_shared_metaspace() and - // MetaspaceObj::is_shared(). - _core_spaces_size = mapinfo->core_spaces_size(); - set_shared_metaspace_range((void*)saved_base[0], (void*)top); - return true; + if (result == MAP_ARCHIVE_SUCCESS) { + if (!main_rs.is_reserved() && class_space_rs.is_reserved()) { + MemTracker::record_virtual_memory_type((address)class_space_rs.base(), mtClass); + } + SharedBaseAddress = (size_t)mapped_base_address; + LP64_ONLY({ + if (Metaspace::using_class_space()) { + assert(class_space_rs.is_reserved(), "must be"); + char* cds_base = static_mapinfo->mapped_base(); + Metaspace::allocate_metaspace_compressed_klass_ptrs(class_space_rs, NULL, (address)cds_base); + // map_heap_regions() compares the current narrow oop and klass encodings + // with the archived ones, so it must be done after all encodings are determined. + static_mapinfo->map_heap_regions(); + } + CompressedKlassPointers::set_range(CompressedClassSpaceSize); + }); + } else { + unmap_archive(static_mapinfo); + unmap_archive(dynamic_mapinfo); + release_reserved_spaces(main_rs, archive_space_rs, class_space_rs); + } + + return result; +} + +char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_mapinfo, + FileMapInfo* dynamic_mapinfo, + bool use_requested_addr, + ReservedSpace& main_rs, + ReservedSpace& archive_space_rs, + ReservedSpace& class_space_rs) { + const bool use_klass_space = NOT_LP64(false) LP64_ONLY(Metaspace::using_class_space()); + const size_t class_space_size = NOT_LP64(0) LP64_ONLY(Metaspace::compressed_class_space_size()); + + if (use_klass_space) { + assert(class_space_size > 0, "CompressedClassSpaceSize must have been validated"); + } + if (use_requested_addr && !is_aligned(static_mapinfo->requested_base_address(), reserved_space_alignment())) { + return NULL; + } + + // Size and requested location of the archive_space_rs (for both static and dynamic archives) + size_t base_offset = static_mapinfo->mapping_base_offset(); + size_t end_offset = (dynamic_mapinfo == NULL) ? static_mapinfo->mapping_end_offset() : dynamic_mapinfo->mapping_end_offset(); + assert(base_offset == 0, "must be"); + assert(is_aligned(end_offset, os::vm_allocation_granularity()), "must be"); + assert(is_aligned(base_offset, os::vm_allocation_granularity()), "must be"); + + // In case reserved_space_alignment() != os::vm_allocation_granularity() + assert((size_t)os::vm_allocation_granularity() <= reserved_space_alignment(), "must be"); + end_offset = align_up(end_offset, reserved_space_alignment()); + + size_t archive_space_size = end_offset - base_offset; + + // Special handling for Windows because it cannot mmap into a reserved space: + // use_requested_addr: We just map each region individually, and give up if any one of them fails. + // !use_requested_addr: We reserve the space first, and then os::read in all the regions (instead of mmap). + // We're going to patch all the pointers anyway so there's no benefit for mmap. + + if (use_requested_addr) { + char* archive_space_base = static_mapinfo->requested_base_address() + base_offset; + char* archive_space_end = archive_space_base + archive_space_size; + if (!MetaspaceShared::use_windows_memory_mapping()) { + archive_space_rs = reserve_shared_space(archive_space_size, archive_space_base); + if (!archive_space_rs.is_reserved()) { + return NULL; + } + } + if (use_klass_space) { + // Make sure we can map the klass space immediately following the archive_space space + char* class_space_base = archive_space_end; + class_space_rs = reserve_shared_space(class_space_size, class_space_base); + if (!class_space_rs.is_reserved()) { + return NULL; + } + } + return static_mapinfo->requested_base_address(); } else { - mapinfo->unmap_regions(regions, saved_base, len); -#ifndef _WINDOWS - // Release the entire mapped region - shared_rs.release(); -#endif - // If -Xshare:on is specified, print out the error message and exit VM, - // otherwise, set UseSharedSpaces to false and continue. - if (RequireSharedSpaces || PrintSharedArchiveAndExit) { - vm_exit_during_initialization("Unable to use shared archive.", "Failed map_region for using -Xshare:on."); + if (use_klass_space) { + main_rs = reserve_shared_space(archive_space_size + class_space_size); + if (main_rs.is_reserved()) { + archive_space_rs = main_rs.first_part(archive_space_size, reserved_space_alignment(), /*split=*/true); + class_space_rs = main_rs.last_part(archive_space_size); + } + } else { + main_rs = reserve_shared_space(archive_space_size); + archive_space_rs = main_rs; + } + if (archive_space_rs.is_reserved()) { + return archive_space_rs.base(); } else { - FLAG_SET_DEFAULT(UseSharedSpaces, false); + return NULL; + } + } +} + +void MetaspaceShared::release_reserved_spaces(ReservedSpace& main_rs, + ReservedSpace& archive_space_rs, + ReservedSpace& class_space_rs) { + if (main_rs.is_reserved()) { + assert(main_rs.contains(archive_space_rs.base()), "must be"); + assert(main_rs.contains(class_space_rs.base()), "must be"); + log_debug(cds)("Released shared space (archive+classes) " INTPTR_FORMAT, p2i(main_rs.base())); + main_rs.release(); + } else { + if (archive_space_rs.is_reserved()) { + log_debug(cds)("Released shared space (archive) " INTPTR_FORMAT, p2i(archive_space_rs.base())); + archive_space_rs.release(); + } + if (class_space_rs.is_reserved()) { + log_debug(cds)("Released shared space (classes) " INTPTR_FORMAT, p2i(class_space_rs.base())); + class_space_rs.release(); } - return false; + } +} + +static int static_regions[] = {MetaspaceShared::mc, + MetaspaceShared::rw, + MetaspaceShared::ro, + MetaspaceShared::md}; +static int dynamic_regions[] = {MetaspaceShared::rw, + MetaspaceShared::ro, + MetaspaceShared::mc}; +static int static_regions_count = 4; +static int dynamic_regions_count = 3; + +MapArchiveResult MetaspaceShared::map_archive(FileMapInfo* mapinfo, char* mapped_base_address, ReservedSpace rs) { + assert(UseSharedSpaces, "must be runtime"); + if (mapinfo == NULL) { + return MAP_ARCHIVE_SUCCESS; // no error has happeed -- trivially succeeded. + } + + mapinfo->set_is_mapped(false); + + if (mapinfo->alignment() != (size_t)os::vm_allocation_granularity()) { + log_error(cds)("Unable to map CDS archive -- os::vm_allocation_granularity() expected: " SIZE_FORMAT + " actual: %d", mapinfo->alignment(), os::vm_allocation_granularity()); + return MAP_ARCHIVE_OTHER_FAILURE; + } + + MapArchiveResult result = mapinfo->is_static() ? + mapinfo->map_regions(static_regions, static_regions_count, mapped_base_address, rs) : + mapinfo->map_regions(dynamic_regions, dynamic_regions_count, mapped_base_address, rs); + + if (result != MAP_ARCHIVE_SUCCESS) { + unmap_archive(mapinfo); + return result; + } + + if (mapinfo->is_static()) { + if (!mapinfo->validate_shared_path_table()) { + unmap_archive(mapinfo); + return MAP_ARCHIVE_OTHER_FAILURE; + } + } else { + if (!DynamicArchive::validate(mapinfo)) { + unmap_archive(mapinfo); + return MAP_ARCHIVE_OTHER_FAILURE; + } + } + + mapinfo->set_is_mapped(true); + return MAP_ARCHIVE_SUCCESS; +} + +void MetaspaceShared::unmap_archive(FileMapInfo* mapinfo) { + assert(UseSharedSpaces, "must be runtime"); + if (mapinfo != NULL) { + if (mapinfo->is_static()) { + mapinfo->unmap_regions(static_regions, static_regions_count); + } else { + mapinfo->unmap_regions(dynamic_regions, dynamic_regions_count); + } + mapinfo->set_is_mapped(false); } } @@ -2023,17 +2350,15 @@ // serialize it out to its various destinations. void MetaspaceShared::initialize_shared_spaces() { - FileMapInfo *mapinfo = FileMapInfo::current_info(); - _i2i_entry_code_buffers = mapinfo->i2i_entry_code_buffers(); - _i2i_entry_code_buffers_size = mapinfo->i2i_entry_code_buffers_size(); - // _core_spaces_size is loaded from the shared archive immediatelly after mapping - assert(_core_spaces_size == mapinfo->core_spaces_size(), "sanity"); - char* buffer = mapinfo->misc_data_patching_start(); + FileMapInfo *static_mapinfo = FileMapInfo::current_info(); + _i2i_entry_code_buffers = static_mapinfo->i2i_entry_code_buffers(); + _i2i_entry_code_buffers_size = static_mapinfo->i2i_entry_code_buffers_size(); + char* buffer = static_mapinfo->misc_data_patching_start(); clone_cpp_vtables((intptr_t*)buffer); // Verify various attributes of the archive, plus initialize the // shared string/symbol tables - buffer = mapinfo->serialized_data_start(); + buffer = static_mapinfo->serialized_data_start(); intptr_t* array = (intptr_t*)buffer; ReadClosure rc(&array); serialize(&rc); @@ -2041,17 +2366,26 @@ // Initialize the run-time symbol table. SymbolTable::create_table(); - mapinfo->patch_archived_heap_embedded_pointers(); + static_mapinfo->patch_archived_heap_embedded_pointers(); // Close the mapinfo file - mapinfo->close(); + static_mapinfo->close(); + + FileMapInfo *dynamic_mapinfo = FileMapInfo::dynamic_info(); + if (dynamic_mapinfo != NULL) { + intptr_t* buffer = (intptr_t*)dynamic_mapinfo->serialized_data_start(); + ReadClosure rc(&buffer); + SymbolTable::serialize_shared_table_header(&rc, false); + SystemDictionaryShared::serialize_dictionary_headers(&rc, false); + dynamic_mapinfo->close(); + } if (PrintSharedArchiveAndExit) { if (PrintSharedDictionary) { tty->print_cr("\nShared classes:\n"); SystemDictionaryShared::print_on(tty); } - if (_archive_loading_failed) { + if (FileMapInfo::current_info() == NULL || _archive_loading_failed) { tty->print_cr("archive is invalid"); vm_exit(1); } else { @@ -2094,3 +2428,10 @@ vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name), "Please reduce the number of shared classes."); } + +// This is used to relocate the pointers so that the archive can be mapped at +// Arguments::default_SharedBaseAddress() without runtime relocation. +intx MetaspaceShared::final_delta() { + return intx(Arguments::default_SharedBaseAddress()) // We want the archive to be mapped to here at runtime + - intx(SharedBaseAddress); // .. but the archive is mapped at here at dump time +}