src/hotspot/share/memory/virtualspace.cpp
changeset 48153 cfa2c43e58c2
parent 47216 71c04702a3d5
child 49593 4dd58ecc9912
equal deleted inserted replaced
48152:bef902d8fef1 48153:cfa2c43e58c2
     1 /*
     1 /*
     2  * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
     2  * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     7  * published by the Free Software Foundation.
    33 
    33 
    34 // ReservedSpace
    34 // ReservedSpace
    35 
    35 
    36 // Dummy constructor
    36 // Dummy constructor
    37 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
    37 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
    38     _alignment(0), _special(false), _executable(false) {
    38     _alignment(0), _special(false), _executable(false), _fd_for_heap(-1) {
    39 }
    39 }
    40 
    40 
    41 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) {
    41 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) {
    42   bool has_preferred_page_size = preferred_page_size != 0;
    42   bool has_preferred_page_size = preferred_page_size != 0;
    43   // Want to use large pages where possible and pad with small pages.
    43   // Want to use large pages where possible and pad with small pages.
    44   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
    44   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
    45   bool large_pages = page_size != (size_t)os::vm_page_size();
    45   bool large_pages = page_size != (size_t)os::vm_page_size();
    46   size_t alignment;
    46   size_t alignment;
    57   initialize(size, alignment, large_pages, NULL, false);
    57   initialize(size, alignment, large_pages, NULL, false);
    58 }
    58 }
    59 
    59 
    60 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
    60 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
    61                              bool large,
    61                              bool large,
    62                              char* requested_address) {
    62                              char* requested_address) : _fd_for_heap(-1) {
    63   initialize(size, alignment, large, requested_address, false);
    63   initialize(size, alignment, large, requested_address, false);
    64 }
    64 }
    65 
    65 
    66 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
    66 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
    67                              bool large,
    67                              bool large,
    68                              bool executable) {
    68                              bool executable) : _fd_for_heap(-1) {
    69   initialize(size, alignment, large, NULL, executable);
    69   initialize(size, alignment, large, NULL, executable);
       
    70 }
       
    71 
       
    72 // Helper method
       
    73 static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) {
       
    74   if (is_file_mapped) {
       
    75     if (!os::unmap_memory(base, size)) {
       
    76       fatal("os::unmap_memory failed");
       
    77     }
       
    78   } else if (!os::release_memory(base, size)) {
       
    79     fatal("os::release_memory failed");
       
    80   }
    70 }
    81 }
    71 
    82 
    72 // Helper method.
    83 // Helper method.
    73 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
    84 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
    74                                            const size_t size, bool special)
    85                                            const size_t size, bool special, bool is_file_mapped = false)
    75 {
    86 {
    76   if (base == requested_address || requested_address == NULL)
    87   if (base == requested_address || requested_address == NULL)
    77     return false; // did not fail
    88     return false; // did not fail
    78 
    89 
    79   if (base != NULL) {
    90   if (base != NULL) {
    85     if (special) {
    96     if (special) {
    86       if (!os::release_memory_special(base, size)) {
    97       if (!os::release_memory_special(base, size)) {
    87         fatal("os::release_memory_special failed");
    98         fatal("os::release_memory_special failed");
    88       }
    99       }
    89     } else {
   100     } else {
    90       if (!os::release_memory(base, size)) {
   101       unmap_or_release_memory(base, size, is_file_mapped);
    91         fatal("os::release_memory failed");
       
    92       }
       
    93     }
   102     }
    94   }
   103   }
    95   return true;
   104   return true;
    96 }
   105 }
    97 
   106 
   118     return;
   127     return;
   119   }
   128   }
   120 
   129 
   121   // If OS doesn't support demand paging for large page memory, we need
   130   // If OS doesn't support demand paging for large page memory, we need
   122   // to use reserve_memory_special() to reserve and pin the entire region.
   131   // to use reserve_memory_special() to reserve and pin the entire region.
       
   132   // If there is a backing file directory for this space then whether
       
   133   // large pages are allocated is up to the filesystem of the backing file.
       
   134   // So we ignore the UseLargePages flag in this case.
   123   bool special = large && !os::can_commit_large_page_memory();
   135   bool special = large && !os::can_commit_large_page_memory();
       
   136   if (special && _fd_for_heap != -1) {
       
   137     special = false;
       
   138     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
       
   139       !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
       
   140       log_debug(gc, heap)("Ignoring UseLargePages since large page support is up to the file system of the backing file for Java heap");
       
   141     }
       
   142   }
       
   143 
   124   char* base = NULL;
   144   char* base = NULL;
   125 
   145 
   126   if (special) {
   146   if (special) {
   127 
   147 
   128     base = os::reserve_memory_special(size, alignment, requested_address, executable);
   148     base = os::reserve_memory_special(size, alignment, requested_address, executable);
   155     // If the memory was requested at a particular address, use
   175     // If the memory was requested at a particular address, use
   156     // os::attempt_reserve_memory_at() to avoid over mapping something
   176     // os::attempt_reserve_memory_at() to avoid over mapping something
   157     // important.  If available space is not detected, return NULL.
   177     // important.  If available space is not detected, return NULL.
   158 
   178 
   159     if (requested_address != 0) {
   179     if (requested_address != 0) {
   160       base = os::attempt_reserve_memory_at(size, requested_address);
   180       base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
   161       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
   181       if (failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
   162         // OS ignored requested address. Try different address.
   182         // OS ignored requested address. Try different address.
   163         base = NULL;
   183         base = NULL;
   164       }
   184       }
   165     } else {
   185     } else {
   166       base = os::reserve_memory(size, NULL, alignment);
   186       base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
   167     }
   187     }
   168 
   188 
   169     if (base == NULL) return;
   189     if (base == NULL) return;
   170 
   190 
   171     // Check alignment constraints
   191     // Check alignment constraints
   172     if ((((size_t)base) & (alignment - 1)) != 0) {
   192     if ((((size_t)base) & (alignment - 1)) != 0) {
   173       // Base not aligned, retry
   193       // Base not aligned, retry
   174       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
   194       unmap_or_release_memory(base, size, _fd_for_heap != -1 /*is_file_mapped*/);
       
   195 
   175       // Make sure that size is aligned
   196       // Make sure that size is aligned
   176       size = align_up(size, alignment);
   197       size = align_up(size, alignment);
   177       base = os::reserve_memory_aligned(size, alignment);
   198       base = os::reserve_memory_aligned(size, alignment, _fd_for_heap);
   178 
   199 
   179       if (requested_address != 0 &&
   200       if (requested_address != 0 &&
   180           failed_to_reserve_as_requested(base, requested_address, size, false)) {
   201           failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
   181         // As a result of the alignment constraints, the allocated base differs
   202         // As a result of the alignment constraints, the allocated base differs
   182         // from the requested address. Return back to the caller who can
   203         // from the requested address. Return back to the caller who can
   183         // take remedial action (like try again without a requested address).
   204         // take remedial action (like try again without a requested address).
   184         assert(_base == NULL, "should be");
   205         assert(_base == NULL, "should be");
   185         return;
   206         return;
   188   }
   209   }
   189   // Done
   210   // Done
   190   _base = base;
   211   _base = base;
   191   _size = size;
   212   _size = size;
   192   _alignment = alignment;
   213   _alignment = alignment;
       
   214   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
       
   215   if (_fd_for_heap != -1) {
       
   216     _special = true;
       
   217   }
   193 }
   218 }
   194 
   219 
   195 
   220 
   196 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
   221 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
   197                              bool special, bool executable) {
   222                              bool special, bool executable) {
   250 void ReservedSpace::release() {
   275 void ReservedSpace::release() {
   251   if (is_reserved()) {
   276   if (is_reserved()) {
   252     char *real_base = _base - _noaccess_prefix;
   277     char *real_base = _base - _noaccess_prefix;
   253     const size_t real_size = _size + _noaccess_prefix;
   278     const size_t real_size = _size + _noaccess_prefix;
   254     if (special()) {
   279     if (special()) {
   255       os::release_memory_special(real_base, real_size);
   280       if (_fd_for_heap != -1) {
       
   281         os::unmap_memory(real_base, real_size);
       
   282       } else {
       
   283         os::release_memory_special(real_base, real_size);
       
   284       }
   256     } else{
   285     } else{
   257       os::release_memory(real_base, real_size);
   286       os::release_memory(real_base, real_size);
   258     }
   287     }
   259     _base = NULL;
   288     _base = NULL;
   260     _size = 0;
   289     _size = 0;
   311     release();
   340     release();
   312   }
   341   }
   313 
   342 
   314   // If OS doesn't support demand paging for large page memory, we need
   343   // If OS doesn't support demand paging for large page memory, we need
   315   // to use reserve_memory_special() to reserve and pin the entire region.
   344   // to use reserve_memory_special() to reserve and pin the entire region.
       
   345   // If there is a backing file directory for this space then whether
       
   346   // large pages are allocated is up to the filesystem of the backing file.
       
   347   // So we ignore the UseLargePages flag in this case.
   316   bool special = large && !os::can_commit_large_page_memory();
   348   bool special = large && !os::can_commit_large_page_memory();
       
   349   if (special && _fd_for_heap != -1) {
       
   350     special = false;
       
   351     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
       
   352                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
       
   353       log_debug(gc, heap)("Cannot allocate large pages for Java Heap when AllocateHeapAt option is set.");
       
   354     }
       
   355   }
   317   char* base = NULL;
   356   char* base = NULL;
   318 
   357 
   319   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
   358   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
   320                              " heap of size " SIZE_FORMAT_HEX,
   359                              " heap of size " SIZE_FORMAT_HEX,
   321                              p2i(requested_address),
   360                              p2i(requested_address),
   348     // If the memory was requested at a particular address, use
   387     // If the memory was requested at a particular address, use
   349     // os::attempt_reserve_memory_at() to avoid over mapping something
   388     // os::attempt_reserve_memory_at() to avoid over mapping something
   350     // important.  If available space is not detected, return NULL.
   389     // important.  If available space is not detected, return NULL.
   351 
   390 
   352     if (requested_address != 0) {
   391     if (requested_address != 0) {
   353       base = os::attempt_reserve_memory_at(size, requested_address);
   392       base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
   354     } else {
   393     } else {
   355       base = os::reserve_memory(size, NULL, alignment);
   394       base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
   356     }
   395     }
   357   }
   396   }
   358   if (base == NULL) { return; }
   397   if (base == NULL) { return; }
   359 
   398 
   360   // Done
   399   // Done
   361   _base = base;
   400   _base = base;
   362   _size = size;
   401   _size = size;
   363   _alignment = alignment;
   402   _alignment = alignment;
       
   403 
       
   404   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
       
   405   if (_fd_for_heap != -1) {
       
   406     _special = true;
       
   407   }
   364 
   408 
   365   // Check alignment constraints
   409   // Check alignment constraints
   366   if ((((size_t)base) & (alignment - 1)) != 0) {
   410   if ((((size_t)base) & (alignment - 1)) != 0) {
   367     // Base not aligned, retry.
   411     // Base not aligned, retry.
   368     release();
   412     release();
   554       initialize(size + noaccess_prefix, alignment, large, NULL, false);
   598       initialize(size + noaccess_prefix, alignment, large, NULL, false);
   555     }
   599     }
   556   }
   600   }
   557 }
   601 }
   558 
   602 
   559 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() {
   603 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* heap_allocation_directory) : ReservedSpace() {
   560 
   604 
   561   if (size == 0) {
   605   if (size == 0) {
   562     return;
   606     return;
       
   607   }
       
   608 
       
   609   if (heap_allocation_directory != NULL) {
       
   610     _fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
       
   611     if (_fd_for_heap == -1) {
       
   612       vm_exit_during_initialization(
       
   613         err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
       
   614     }
   563   }
   615   }
   564 
   616 
   565   // Heap size should be aligned to alignment, too.
   617   // Heap size should be aligned to alignment, too.
   566   guarantee(is_aligned(size, alignment), "set by caller");
   618   guarantee(is_aligned(size, alignment), "set by caller");
   567 
   619 
   582   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
   634   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
   583          "area must be distinguishable from marks for mark-sweep");
   635          "area must be distinguishable from marks for mark-sweep");
   584 
   636 
   585   if (base() != NULL) {
   637   if (base() != NULL) {
   586     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
   638     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
       
   639   }
       
   640 
       
   641   if (_fd_for_heap != -1) {
       
   642     os::close(_fd_for_heap);
   587   }
   643   }
   588 }
   644 }
   589 
   645 
   590 // Reserve space for code segment.  Same as Java heap only we mark this as
   646 // Reserve space for code segment.  Same as Java heap only we mark this as
   591 // executable.
   647 // executable.