hotspot/src/share/vm/runtime/virtualspace.cpp
changeset 30291 54cdc5c1a9cb
parent 30288 476c276de939
child 30292 2ae3e5b862e1
equal deleted inserted replaced
30288:476c276de939 30291:54cdc5c1a9cb
     1 /*
       
     2  * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
       
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
       
     4  *
       
     5  * This code is free software; you can redistribute it and/or modify it
       
     6  * under the terms of the GNU General Public License version 2 only, as
       
     7  * published by the Free Software Foundation.
       
     8  *
       
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
       
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
       
    12  * version 2 for more details (a copy is included in the LICENSE file that
       
    13  * accompanied this code).
       
    14  *
       
    15  * You should have received a copy of the GNU General Public License version
       
    16  * 2 along with this work; if not, write to the Free Software Foundation,
       
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
       
    18  *
       
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
       
    20  * or visit www.oracle.com if you need additional information or have any
       
    21  * questions.
       
    22  *
       
    23  */
       
    24 
       
    25 #include "precompiled.hpp"
       
    26 #include "oops/markOop.hpp"
       
    27 #include "oops/oop.inline.hpp"
       
    28 #include "runtime/virtualspace.hpp"
       
    29 #include "services/memTracker.hpp"
       
    30 
       
    31 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
       
    32 
       
    33 // ReservedSpace
       
    34 
       
    35 // Dummy constructor
       
    36 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
       
    37     _alignment(0), _special(false), _executable(false) {
       
    38 }
       
    39 
       
    40 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) {
       
    41   bool has_preferred_page_size = preferred_page_size != 0;
       
    42   // Want to use large pages where possible and pad with small pages.
       
    43   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
       
    44   bool large_pages = page_size != (size_t)os::vm_page_size();
       
    45   size_t alignment;
       
    46   if (large_pages && has_preferred_page_size) {
       
    47     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
       
    48     // ReservedSpace initialization requires size to be aligned to the given
       
    49     // alignment. Align the size up.
       
    50     size = align_size_up(size, alignment);
       
    51   } else {
       
    52     // Don't force the alignment to be large page aligned,
       
    53     // since that will waste memory.
       
    54     alignment = os::vm_allocation_granularity();
       
    55   }
       
    56   initialize(size, alignment, large_pages, NULL, false);
       
    57 }
       
    58 
       
    59 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
       
    60                              bool large,
       
    61                              char* requested_address) {
       
    62   initialize(size, alignment, large, requested_address, false);
       
    63 }
       
    64 
       
    65 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
       
    66                              bool large,
       
    67                              bool executable) {
       
    68   initialize(size, alignment, large, NULL, executable);
       
    69 }
       
    70 
       
    71 // Helper method.
       
    72 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
       
    73                                            const size_t size, bool special)
       
    74 {
       
    75   if (base == requested_address || requested_address == NULL)
       
    76     return false; // did not fail
       
    77 
       
    78   if (base != NULL) {
       
    79     // Different reserve address may be acceptable in other cases
       
    80     // but for compressed oops heap should be at requested address.
       
    81     assert(UseCompressedOops, "currently requested address used only for compressed oops");
       
    82     if (PrintCompressedOopsMode) {
       
    83       tty->cr();
       
    84       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
       
    85     }
       
    86     // OS ignored requested address. Try different address.
       
    87     if (special) {
       
    88       if (!os::release_memory_special(base, size)) {
       
    89         fatal("os::release_memory_special failed");
       
    90       }
       
    91     } else {
       
    92       if (!os::release_memory(base, size)) {
       
    93         fatal("os::release_memory failed");
       
    94       }
       
    95     }
       
    96   }
       
    97   return true;
       
    98 }
       
    99 
       
   100 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
       
   101                                char* requested_address,
       
   102                                bool executable) {
       
   103   const size_t granularity = os::vm_allocation_granularity();
       
   104   assert((size & (granularity - 1)) == 0,
       
   105          "size not aligned to os::vm_allocation_granularity()");
       
   106   assert((alignment & (granularity - 1)) == 0,
       
   107          "alignment not aligned to os::vm_allocation_granularity()");
       
   108   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
       
   109          "not a power of 2");
       
   110 
       
   111   alignment = MAX2(alignment, (size_t)os::vm_page_size());
       
   112 
       
   113   _base = NULL;
       
   114   _size = 0;
       
   115   _special = false;
       
   116   _executable = executable;
       
   117   _alignment = 0;
       
   118   _noaccess_prefix = 0;
       
   119   if (size == 0) {
       
   120     return;
       
   121   }
       
   122 
       
   123   // If OS doesn't support demand paging for large page memory, we need
       
   124   // to use reserve_memory_special() to reserve and pin the entire region.
       
   125   bool special = large && !os::can_commit_large_page_memory();
       
   126   char* base = NULL;
       
   127 
       
   128   if (special) {
       
   129 
       
   130     base = os::reserve_memory_special(size, alignment, requested_address, executable);
       
   131 
       
   132     if (base != NULL) {
       
   133       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
       
   134         // OS ignored requested address. Try different address.
       
   135         return;
       
   136       }
       
   137       // Check alignment constraints.
       
   138       assert((uintptr_t) base % alignment == 0,
       
   139              err_msg("Large pages returned a non-aligned address, base: "
       
   140                  PTR_FORMAT " alignment: " PTR_FORMAT,
       
   141                  base, (void*)(uintptr_t)alignment));
       
   142       _special = true;
       
   143     } else {
       
   144       // failed; try to reserve regular memory below
       
   145       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
       
   146                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
       
   147         if (PrintCompressedOopsMode) {
       
   148           tty->cr();
       
   149           tty->print_cr("Reserve regular memory without large pages.");
       
   150         }
       
   151       }
       
   152     }
       
   153   }
       
   154 
       
   155   if (base == NULL) {
       
   156     // Optimistically assume that the OSes returns an aligned base pointer.
       
   157     // When reserving a large address range, most OSes seem to align to at
       
   158     // least 64K.
       
   159 
       
   160     // If the memory was requested at a particular address, use
       
   161     // os::attempt_reserve_memory_at() to avoid over mapping something
       
   162     // important.  If available space is not detected, return NULL.
       
   163 
       
   164     if (requested_address != 0) {
       
   165       base = os::attempt_reserve_memory_at(size, requested_address);
       
   166       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
       
   167         // OS ignored requested address. Try different address.
       
   168         base = NULL;
       
   169       }
       
   170     } else {
       
   171       base = os::reserve_memory(size, NULL, alignment);
       
   172     }
       
   173 
       
   174     if (base == NULL) return;
       
   175 
       
   176     // Check alignment constraints
       
   177     if ((((size_t)base) & (alignment - 1)) != 0) {
       
   178       // Base not aligned, retry
       
   179       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
       
   180       // Make sure that size is aligned
       
   181       size = align_size_up(size, alignment);
       
   182       base = os::reserve_memory_aligned(size, alignment);
       
   183 
       
   184       if (requested_address != 0 &&
       
   185           failed_to_reserve_as_requested(base, requested_address, size, false)) {
       
   186         // As a result of the alignment constraints, the allocated base differs
       
   187         // from the requested address. Return back to the caller who can
       
   188         // take remedial action (like try again without a requested address).
       
   189         assert(_base == NULL, "should be");
       
   190         return;
       
   191       }
       
   192     }
       
   193   }
       
   194   // Done
       
   195   _base = base;
       
   196   _size = size;
       
   197   _alignment = alignment;
       
   198 }
       
   199 
       
   200 
       
   201 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
       
   202                              bool special, bool executable) {
       
   203   assert((size % os::vm_allocation_granularity()) == 0,
       
   204          "size not allocation aligned");
       
   205   _base = base;
       
   206   _size = size;
       
   207   _alignment = alignment;
       
   208   _noaccess_prefix = 0;
       
   209   _special = special;
       
   210   _executable = executable;
       
   211 }
       
   212 
       
   213 
       
   214 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
       
   215                                         bool split, bool realloc) {
       
   216   assert(partition_size <= size(), "partition failed");
       
   217   if (split) {
       
   218     os::split_reserved_memory(base(), size(), partition_size, realloc);
       
   219   }
       
   220   ReservedSpace result(base(), partition_size, alignment, special(),
       
   221                        executable());
       
   222   return result;
       
   223 }
       
   224 
       
   225 
       
   226 ReservedSpace
       
   227 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
       
   228   assert(partition_size <= size(), "partition failed");
       
   229   ReservedSpace result(base() + partition_size, size() - partition_size,
       
   230                        alignment, special(), executable());
       
   231   return result;
       
   232 }
       
   233 
       
   234 
       
   235 size_t ReservedSpace::page_align_size_up(size_t size) {
       
   236   return align_size_up(size, os::vm_page_size());
       
   237 }
       
   238 
       
   239 
       
   240 size_t ReservedSpace::page_align_size_down(size_t size) {
       
   241   return align_size_down(size, os::vm_page_size());
       
   242 }
       
   243 
       
   244 
       
   245 size_t ReservedSpace::allocation_align_size_up(size_t size) {
       
   246   return align_size_up(size, os::vm_allocation_granularity());
       
   247 }
       
   248 
       
   249 
       
   250 size_t ReservedSpace::allocation_align_size_down(size_t size) {
       
   251   return align_size_down(size, os::vm_allocation_granularity());
       
   252 }
       
   253 
       
   254 
       
   255 void ReservedSpace::release() {
       
   256   if (is_reserved()) {
       
   257     char *real_base = _base - _noaccess_prefix;
       
   258     const size_t real_size = _size + _noaccess_prefix;
       
   259     if (special()) {
       
   260       os::release_memory_special(real_base, real_size);
       
   261     } else{
       
   262       os::release_memory(real_base, real_size);
       
   263     }
       
   264     _base = NULL;
       
   265     _size = 0;
       
   266     _noaccess_prefix = 0;
       
   267     _alignment = 0;
       
   268     _special = false;
       
   269     _executable = false;
       
   270   }
       
   271 }
       
   272 
       
   273 static size_t noaccess_prefix_size(size_t alignment) {
       
   274   return lcm(os::vm_page_size(), alignment);
       
   275 }
       
   276 
       
   277 void ReservedHeapSpace::establish_noaccess_prefix() {
       
   278   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
       
   279   _noaccess_prefix = noaccess_prefix_size(_alignment);
       
   280 
       
   281   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
       
   282     if (true
       
   283         WIN64_ONLY(&& !UseLargePages)
       
   284         AIX_ONLY(&& os::vm_page_size() != SIZE_64K)) {
       
   285       // Protect memory at the base of the allocated region.
       
   286       // If special, the page was committed (only matters on windows)
       
   287       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
       
   288         fatal("cannot protect protection page");
       
   289       }
       
   290       if (PrintCompressedOopsMode) {
       
   291         tty->cr();
       
   292         tty->print_cr("Protected page at the reserved heap base: "
       
   293                       PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
       
   294       }
       
   295       assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
       
   296     } else {
       
   297       Universe::set_narrow_oop_use_implicit_null_checks(false);
       
   298     }
       
   299   }
       
   300 
       
   301   _base += _noaccess_prefix;
       
   302   _size -= _noaccess_prefix;
       
   303   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
       
   304 }
       
   305 
       
   306 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
       
   307 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
       
   308 // might still fulfill the wishes of the caller.
       
   309 // Assures the memory is aligned to 'alignment'.
       
   310 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
       
   311 void ReservedHeapSpace::try_reserve_heap(size_t size,
       
   312                                          size_t alignment,
       
   313                                          bool large,
       
   314                                          char* requested_address) {
       
   315   if (_base != NULL) {
       
   316     // We tried before, but we didn't like the address delivered.
       
   317     release();
       
   318   }
       
   319 
       
   320   // If OS doesn't support demand paging for large page memory, we need
       
   321   // to use reserve_memory_special() to reserve and pin the entire region.
       
   322   bool special = large && !os::can_commit_large_page_memory();
       
   323   char* base = NULL;
       
   324 
       
   325   if (PrintCompressedOopsMode && Verbose) {
       
   326     tty->print("Trying to allocate at address " PTR_FORMAT " heap of size " PTR_FORMAT ".\n",
       
   327                requested_address, (address)size);
       
   328   }
       
   329 
       
   330   if (special) {
       
   331     base = os::reserve_memory_special(size, alignment, requested_address, false);
       
   332 
       
   333     if (base != NULL) {
       
   334       // Check alignment constraints.
       
   335       assert((uintptr_t) base % alignment == 0,
       
   336              err_msg("Large pages returned a non-aligned address, base: "
       
   337                      PTR_FORMAT " alignment: " PTR_FORMAT,
       
   338                      base, (void*)(uintptr_t)alignment));
       
   339       _special = true;
       
   340     }
       
   341   }
       
   342 
       
   343   if (base == NULL) {
       
   344     // Failed; try to reserve regular memory below
       
   345     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
       
   346                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
       
   347       if (PrintCompressedOopsMode) {
       
   348         tty->cr();
       
   349         tty->print_cr("Reserve regular memory without large pages.");
       
   350       }
       
   351     }
       
   352 
       
   353     // Optimistically assume that the OSes returns an aligned base pointer.
       
   354     // When reserving a large address range, most OSes seem to align to at
       
   355     // least 64K.
       
   356 
       
   357     // If the memory was requested at a particular address, use
       
   358     // os::attempt_reserve_memory_at() to avoid over mapping something
       
   359     // important.  If available space is not detected, return NULL.
       
   360 
       
   361     if (requested_address != 0) {
       
   362       base = os::attempt_reserve_memory_at(size, requested_address);
       
   363     } else {
       
   364       base = os::reserve_memory(size, NULL, alignment);
       
   365     }
       
   366   }
       
   367   if (base == NULL) { return; }
       
   368 
       
   369   // Done
       
   370   _base = base;
       
   371   _size = size;
       
   372   _alignment = alignment;
       
   373 
       
   374   // Check alignment constraints
       
   375   if ((((size_t)base) & (alignment - 1)) != 0) {
       
   376     // Base not aligned, retry.
       
   377     release();
       
   378   }
       
   379 }
       
   380 
       
   381 void ReservedHeapSpace::try_reserve_range(char *highest_start,
       
   382                                           char *lowest_start,
       
   383                                           size_t attach_point_alignment,
       
   384                                           char *aligned_heap_base_min_address,
       
   385                                           char *upper_bound,
       
   386                                           size_t size,
       
   387                                           size_t alignment,
       
   388                                           bool large) {
       
   389   const size_t attach_range = highest_start - lowest_start;
       
   390   // Cap num_attempts at possible number.
       
   391   // At least one is possible even for 0 sized attach range.
       
   392   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
       
   393   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
       
   394 
       
   395   const size_t stepsize = (attach_range == 0) ? // Only one try.
       
   396     (size_t) highest_start : align_size_up(attach_range / num_attempts_to_try, attach_point_alignment);
       
   397 
       
   398   // Try attach points from top to bottom.
       
   399   char* attach_point = highest_start;
       
   400   while (attach_point >= lowest_start  &&
       
   401          attach_point <= highest_start &&  // Avoid wrap around.
       
   402          ((_base == NULL) ||
       
   403           (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
       
   404     try_reserve_heap(size, alignment, large, attach_point);
       
   405     attach_point -= stepsize;
       
   406   }
       
   407 }
       
   408 
       
   409 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
       
   410 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
       
   411 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
       
   412 
       
   413 // Helper for heap allocation. Returns an array with addresses
       
   414 // (OS-specific) which are suited for disjoint base mode. Array is
       
   415 // NULL terminated.
       
   416 static char** get_attach_addresses_for_disjoint_mode() {
       
   417   static uint64_t addresses[] = {
       
   418      2 * SIZE_32G,
       
   419      3 * SIZE_32G,
       
   420      4 * SIZE_32G,
       
   421      8 * SIZE_32G,
       
   422     10 * SIZE_32G,
       
   423      1 * SIZE_64K * SIZE_32G,
       
   424      2 * SIZE_64K * SIZE_32G,
       
   425      3 * SIZE_64K * SIZE_32G,
       
   426      4 * SIZE_64K * SIZE_32G,
       
   427     16 * SIZE_64K * SIZE_32G,
       
   428     32 * SIZE_64K * SIZE_32G,
       
   429     34 * SIZE_64K * SIZE_32G,
       
   430     0
       
   431   };
       
   432 
       
   433   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
       
   434   // the array is sorted.
       
   435   uint i = 0;
       
   436   while (addresses[i] != 0 &&
       
   437          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
       
   438     i++;
       
   439   }
       
   440   uint start = i;
       
   441 
       
   442   // Avoid more steps than requested.
       
   443   i = 0;
       
   444   while (addresses[start+i] != 0) {
       
   445     if (i == HeapSearchSteps) {
       
   446       addresses[start+i] = 0;
       
   447       break;
       
   448     }
       
   449     i++;
       
   450   }
       
   451 
       
   452   return (char**) &addresses[start];
       
   453 }
       
   454 
       
   455 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
       
   456   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
       
   457             "can not allocate compressed oop heap for this size");
       
   458   guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
       
   459   assert(HeapBaseMinAddress > 0, "sanity");
       
   460 
       
   461   const size_t granularity = os::vm_allocation_granularity();
       
   462   assert((size & (granularity - 1)) == 0,
       
   463          "size not aligned to os::vm_allocation_granularity()");
       
   464   assert((alignment & (granularity - 1)) == 0,
       
   465          "alignment not aligned to os::vm_allocation_granularity()");
       
   466   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
       
   467          "not a power of 2");
       
   468 
       
   469   // The necessary attach point alignment for generated wish addresses.
       
   470   // This is needed to increase the chance of attaching for mmap and shmat.
       
   471   const size_t os_attach_point_alignment =
       
   472     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
       
   473     NOT_AIX(os::vm_allocation_granularity());
       
   474   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
       
   475 
       
   476   char *aligned_heap_base_min_address = (char *)align_ptr_up((void *)HeapBaseMinAddress, alignment);
       
   477   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
       
   478     noaccess_prefix_size(alignment) : 0;
       
   479 
       
   480   // Attempt to alloc at user-given address.
       
   481   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
       
   482     try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
       
   483     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
       
   484       release();
       
   485     }
       
   486   }
       
   487 
       
   488   // Keep heap at HeapBaseMinAddress.
       
   489   if (_base == NULL) {
       
   490 
       
   491     // Try to allocate the heap at addresses that allow efficient oop compression.
       
   492     // Different schemes are tried, in order of decreasing optimization potential.
       
   493     //
       
   494     // For this, try_reserve_heap() is called with the desired heap base addresses.
       
   495     // A call into the os layer to allocate at a given address can return memory
       
   496     // at a different address than requested.  Still, this might be memory at a useful
       
   497     // address. try_reserve_heap() always returns this allocated memory, as only here
       
   498     // the criteria for a good heap are checked.
       
   499 
       
   500     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
       
   501     // Give it several tries from top of range to bottom.
       
   502     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
       
   503 
       
   504       // Calc address range within we try to attach (range of possible start addresses).
       
   505       char* const highest_start = (char *)align_ptr_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
       
   506       char* const lowest_start  = (char *)align_ptr_up  (        aligned_heap_base_min_address             , attach_point_alignment);
       
   507       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
       
   508                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
       
   509     }
       
   510 
       
   511     // zerobased: Attempt to allocate in the lower 32G.
       
   512     // But leave room for the compressed class pointers, which is allocated above
       
   513     // the heap.
       
   514     char *zerobased_max = (char *)OopEncodingHeapMax;
       
   515     const size_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
       
   516     // For small heaps, save some space for compressed class pointer
       
   517     // space so it can be decoded with no base.
       
   518     if (UseCompressedClassPointers && !UseSharedSpaces &&
       
   519         OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
       
   520         (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
       
   521       zerobased_max = (char *)OopEncodingHeapMax - class_space;
       
   522     }
       
   523 
       
   524     // Give it several tries from top of range to bottom.
       
   525     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
       
   526         ((_base == NULL) ||                        // No previous try succeeded.
       
   527          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
       
   528 
       
   529       // Calc address range within we try to attach (range of possible start addresses).
       
   530       char *const highest_start = (char *)align_ptr_down(zerobased_max - size, attach_point_alignment);
       
   531       // Need to be careful about size being guaranteed to be less
       
   532       // than UnscaledOopHeapMax due to type constraints.
       
   533       char *lowest_start = aligned_heap_base_min_address;
       
   534       uint64_t unscaled_end = UnscaledOopHeapMax - size;
       
   535       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
       
   536         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
       
   537       }
       
   538       lowest_start  = (char *)align_ptr_up(lowest_start, attach_point_alignment);
       
   539       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
       
   540                         aligned_heap_base_min_address, zerobased_max, size, alignment, large);
       
   541     }
       
   542 
       
   543     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
       
   544     // implement null checks.
       
   545     noaccess_prefix = noaccess_prefix_size(alignment);
       
   546 
       
   547     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
       
   548     char** addresses = get_attach_addresses_for_disjoint_mode();
       
   549     int i = 0;
       
   550     while (addresses[i] &&                                 // End of array not yet reached.
       
   551            ((_base == NULL) ||                             // No previous try succeeded.
       
   552             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
       
   553              !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
       
   554       char* const attach_point = addresses[i];
       
   555       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
       
   556       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
       
   557       i++;
       
   558     }
       
   559 
       
   560     // Last, desperate try without any placement.
       
   561     if (_base == NULL) {
       
   562       if (PrintCompressedOopsMode && Verbose) {
       
   563         tty->print("Trying to allocate at address NULL heap of size " PTR_FORMAT ".\n", (address)size + noaccess_prefix);
       
   564       }
       
   565       initialize(size + noaccess_prefix, alignment, large, NULL, false);
       
   566     }
       
   567   }
       
   568 }
       
   569 
       
   570 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() {
       
   571 
       
   572   if (size == 0) {
       
   573     return;
       
   574   }
       
   575 
       
   576   // Heap size should be aligned to alignment, too.
       
   577   guarantee(is_size_aligned(size, alignment), "set by caller");
       
   578 
       
   579   if (UseCompressedOops) {
       
   580     initialize_compressed_heap(size, alignment, large);
       
   581     if (_size > size) {
       
   582       // We allocated heap with noaccess prefix.
       
   583       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
       
   584       // if we had to try at arbitrary address.
       
   585       establish_noaccess_prefix();
       
   586     }
       
   587   } else {
       
   588     initialize(size, alignment, large, NULL, false);
       
   589   }
       
   590 
       
   591   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
       
   592          "area must be distinguishable from marks for mark-sweep");
       
   593   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
       
   594          "area must be distinguishable from marks for mark-sweep");
       
   595 
       
   596   if (base() > 0) {
       
   597     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
       
   598   }
       
   599 }
       
   600 
       
   601 // Reserve space for code segment.  Same as Java heap only we mark this as
       
   602 // executable.
       
   603 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
       
   604                                      size_t rs_align,
       
   605                                      bool large) :
       
   606   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
       
   607   MemTracker::record_virtual_memory_type((address)base(), mtCode);
       
   608 }
       
   609 
       
   610 // VirtualSpace
       
   611 
       
   612 VirtualSpace::VirtualSpace() {
       
   613   _low_boundary           = NULL;
       
   614   _high_boundary          = NULL;
       
   615   _low                    = NULL;
       
   616   _high                   = NULL;
       
   617   _lower_high             = NULL;
       
   618   _middle_high            = NULL;
       
   619   _upper_high             = NULL;
       
   620   _lower_high_boundary    = NULL;
       
   621   _middle_high_boundary   = NULL;
       
   622   _upper_high_boundary    = NULL;
       
   623   _lower_alignment        = 0;
       
   624   _middle_alignment       = 0;
       
   625   _upper_alignment        = 0;
       
   626   _special                = false;
       
   627   _executable             = false;
       
   628 }
       
   629 
       
   630 
       
   631 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
       
   632   const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
       
   633   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
       
   634 }
       
   635 
       
   636 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
       
   637   if(!rs.is_reserved()) return false;  // allocation failed.
       
   638   assert(_low_boundary == NULL, "VirtualSpace already initialized");
       
   639   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
       
   640 
       
   641   _low_boundary  = rs.base();
       
   642   _high_boundary = low_boundary() + rs.size();
       
   643 
       
   644   _low = low_boundary();
       
   645   _high = low();
       
   646 
       
   647   _special = rs.special();
       
   648   _executable = rs.executable();
       
   649 
       
   650   // When a VirtualSpace begins life at a large size, make all future expansion
       
   651   // and shrinking occur aligned to a granularity of large pages.  This avoids
       
   652   // fragmentation of physical addresses that inhibits the use of large pages
       
   653   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
       
   654   // page size, the only spaces that get handled this way are codecache and
       
   655   // the heap itself, both of which provide a substantial performance
       
   656   // boost in many benchmarks when covered by large pages.
       
   657   //
       
   658   // No attempt is made to force large page alignment at the very top and
       
   659   // bottom of the space if they are not aligned so already.
       
   660   _lower_alignment  = os::vm_page_size();
       
   661   _middle_alignment = max_commit_granularity;
       
   662   _upper_alignment  = os::vm_page_size();
       
   663 
       
   664   // End of each region
       
   665   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
       
   666   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
       
   667   _upper_high_boundary = high_boundary();
       
   668 
       
   669   // High address of each region
       
   670   _lower_high = low_boundary();
       
   671   _middle_high = lower_high_boundary();
       
   672   _upper_high = middle_high_boundary();
       
   673 
       
   674   // commit to initial size
       
   675   if (committed_size > 0) {
       
   676     if (!expand_by(committed_size)) {
       
   677       return false;
       
   678     }
       
   679   }
       
   680   return true;
       
   681 }
       
   682 
       
   683 
       
   684 VirtualSpace::~VirtualSpace() {
       
   685   release();
       
   686 }
       
   687 
       
   688 
       
   689 void VirtualSpace::release() {
       
   690   // This does not release memory it never reserved.
       
   691   // Caller must release via rs.release();
       
   692   _low_boundary           = NULL;
       
   693   _high_boundary          = NULL;
       
   694   _low                    = NULL;
       
   695   _high                   = NULL;
       
   696   _lower_high             = NULL;
       
   697   _middle_high            = NULL;
       
   698   _upper_high             = NULL;
       
   699   _lower_high_boundary    = NULL;
       
   700   _middle_high_boundary   = NULL;
       
   701   _upper_high_boundary    = NULL;
       
   702   _lower_alignment        = 0;
       
   703   _middle_alignment       = 0;
       
   704   _upper_alignment        = 0;
       
   705   _special                = false;
       
   706   _executable             = false;
       
   707 }
       
   708 
       
   709 
       
   710 size_t VirtualSpace::committed_size() const {
       
   711   return pointer_delta(high(), low(), sizeof(char));
       
   712 }
       
   713 
       
   714 
       
   715 size_t VirtualSpace::reserved_size() const {
       
   716   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
       
   717 }
       
   718 
       
   719 
       
   720 size_t VirtualSpace::uncommitted_size()  const {
       
   721   return reserved_size() - committed_size();
       
   722 }
       
   723 
       
   724 size_t VirtualSpace::actual_committed_size() const {
       
   725   // Special VirtualSpaces commit all reserved space up front.
       
   726   if (special()) {
       
   727     return reserved_size();
       
   728   }
       
   729 
       
   730   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
       
   731   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
       
   732   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
       
   733 
       
   734 #ifdef ASSERT
       
   735   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
       
   736   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
       
   737   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
       
   738 
       
   739   if (committed_high > 0) {
       
   740     assert(committed_low == lower, "Must be");
       
   741     assert(committed_middle == middle, "Must be");
       
   742   }
       
   743 
       
   744   if (committed_middle > 0) {
       
   745     assert(committed_low == lower, "Must be");
       
   746   }
       
   747   if (committed_middle < middle) {
       
   748     assert(committed_high == 0, "Must be");
       
   749   }
       
   750 
       
   751   if (committed_low < lower) {
       
   752     assert(committed_high == 0, "Must be");
       
   753     assert(committed_middle == 0, "Must be");
       
   754   }
       
   755 #endif
       
   756 
       
   757   return committed_low + committed_middle + committed_high;
       
   758 }
       
   759 
       
   760 
       
   761 bool VirtualSpace::contains(const void* p) const {
       
   762   return low() <= (const char*) p && (const char*) p < high();
       
   763 }
       
   764 
       
   765 /*
       
   766    First we need to determine if a particular virtual space is using large
       
   767    pages.  This is done at the initialize function and only virtual spaces
       
   768    that are larger than LargePageSizeInBytes use large pages.  Once we
       
   769    have determined this, all expand_by and shrink_by calls must grow and
       
   770    shrink by large page size chunks.  If a particular request
       
   771    is within the current large page, the call to commit and uncommit memory
       
   772    can be ignored.  In the case that the low and high boundaries of this
       
   773    space is not large page aligned, the pages leading to the first large
       
   774    page address and the pages after the last large page address must be
       
   775    allocated with default pages.
       
   776 */
       
   777 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
       
   778   if (uncommitted_size() < bytes) return false;
       
   779 
       
   780   if (special()) {
       
   781     // don't commit memory if the entire space is pinned in memory
       
   782     _high += bytes;
       
   783     return true;
       
   784   }
       
   785 
       
   786   char* previous_high = high();
       
   787   char* unaligned_new_high = high() + bytes;
       
   788   assert(unaligned_new_high <= high_boundary(),
       
   789          "cannot expand by more than upper boundary");
       
   790 
       
   791   // Calculate where the new high for each of the regions should be.  If
       
   792   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
       
   793   // then the unaligned lower and upper new highs would be the
       
   794   // lower_high() and upper_high() respectively.
       
   795   char* unaligned_lower_new_high =
       
   796     MIN2(unaligned_new_high, lower_high_boundary());
       
   797   char* unaligned_middle_new_high =
       
   798     MIN2(unaligned_new_high, middle_high_boundary());
       
   799   char* unaligned_upper_new_high =
       
   800     MIN2(unaligned_new_high, upper_high_boundary());
       
   801 
       
   802   // Align the new highs based on the regions alignment.  lower and upper
       
   803   // alignment will always be default page size.  middle alignment will be
       
   804   // LargePageSizeInBytes if the actual size of the virtual space is in
       
   805   // fact larger than LargePageSizeInBytes.
       
   806   char* aligned_lower_new_high =
       
   807     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
       
   808   char* aligned_middle_new_high =
       
   809     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
       
   810   char* aligned_upper_new_high =
       
   811     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
       
   812 
       
   813   // Determine which regions need to grow in this expand_by call.
       
   814   // If you are growing in the lower region, high() must be in that
       
   815   // region so calculate the size based on high().  For the middle and
       
   816   // upper regions, determine the starting point of growth based on the
       
   817   // location of high().  By getting the MAX of the region's low address
       
   818   // (or the previous region's high address) and high(), we can tell if it
       
   819   // is an intra or inter region growth.
       
   820   size_t lower_needs = 0;
       
   821   if (aligned_lower_new_high > lower_high()) {
       
   822     lower_needs =
       
   823       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
       
   824   }
       
   825   size_t middle_needs = 0;
       
   826   if (aligned_middle_new_high > middle_high()) {
       
   827     middle_needs =
       
   828       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
       
   829   }
       
   830   size_t upper_needs = 0;
       
   831   if (aligned_upper_new_high > upper_high()) {
       
   832     upper_needs =
       
   833       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
       
   834   }
       
   835 
       
   836   // Check contiguity.
       
   837   assert(low_boundary() <= lower_high() &&
       
   838          lower_high() <= lower_high_boundary(),
       
   839          "high address must be contained within the region");
       
   840   assert(lower_high_boundary() <= middle_high() &&
       
   841          middle_high() <= middle_high_boundary(),
       
   842          "high address must be contained within the region");
       
   843   assert(middle_high_boundary() <= upper_high() &&
       
   844          upper_high() <= upper_high_boundary(),
       
   845          "high address must be contained within the region");
       
   846 
       
   847   // Commit regions
       
   848   if (lower_needs > 0) {
       
   849     assert(low_boundary() <= lower_high() &&
       
   850            lower_high() + lower_needs <= lower_high_boundary(),
       
   851            "must not expand beyond region");
       
   852     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
       
   853       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
       
   854                          ", lower_needs=" SIZE_FORMAT ", %d) failed",
       
   855                          lower_high(), lower_needs, _executable);)
       
   856       return false;
       
   857     } else {
       
   858       _lower_high += lower_needs;
       
   859     }
       
   860   }
       
   861   if (middle_needs > 0) {
       
   862     assert(lower_high_boundary() <= middle_high() &&
       
   863            middle_high() + middle_needs <= middle_high_boundary(),
       
   864            "must not expand beyond region");
       
   865     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
       
   866                            _executable)) {
       
   867       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
       
   868                          ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
       
   869                          ", %d) failed", middle_high(), middle_needs,
       
   870                          middle_alignment(), _executable);)
       
   871       return false;
       
   872     }
       
   873     _middle_high += middle_needs;
       
   874   }
       
   875   if (upper_needs > 0) {
       
   876     assert(middle_high_boundary() <= upper_high() &&
       
   877            upper_high() + upper_needs <= upper_high_boundary(),
       
   878            "must not expand beyond region");
       
   879     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
       
   880       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
       
   881                          ", upper_needs=" SIZE_FORMAT ", %d) failed",
       
   882                          upper_high(), upper_needs, _executable);)
       
   883       return false;
       
   884     } else {
       
   885       _upper_high += upper_needs;
       
   886     }
       
   887   }
       
   888 
       
   889   if (pre_touch || AlwaysPreTouch) {
       
   890     os::pretouch_memory(previous_high, unaligned_new_high);
       
   891   }
       
   892 
       
   893   _high += bytes;
       
   894   return true;
       
   895 }
       
   896 
       
   897 // A page is uncommitted if the contents of the entire page is deemed unusable.
       
   898 // Continue to decrement the high() pointer until it reaches a page boundary
       
   899 // in which case that particular page can now be uncommitted.
       
   900 void VirtualSpace::shrink_by(size_t size) {
       
   901   if (committed_size() < size)
       
   902     fatal("Cannot shrink virtual space to negative size");
       
   903 
       
   904   if (special()) {
       
   905     // don't uncommit if the entire space is pinned in memory
       
   906     _high -= size;
       
   907     return;
       
   908   }
       
   909 
       
   910   char* unaligned_new_high = high() - size;
       
   911   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
       
   912 
       
   913   // Calculate new unaligned address
       
   914   char* unaligned_upper_new_high =
       
   915     MAX2(unaligned_new_high, middle_high_boundary());
       
   916   char* unaligned_middle_new_high =
       
   917     MAX2(unaligned_new_high, lower_high_boundary());
       
   918   char* unaligned_lower_new_high =
       
   919     MAX2(unaligned_new_high, low_boundary());
       
   920 
       
   921   // Align address to region's alignment
       
   922   char* aligned_upper_new_high =
       
   923     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
       
   924   char* aligned_middle_new_high =
       
   925     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
       
   926   char* aligned_lower_new_high =
       
   927     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
       
   928 
       
   929   // Determine which regions need to shrink
       
   930   size_t upper_needs = 0;
       
   931   if (aligned_upper_new_high < upper_high()) {
       
   932     upper_needs =
       
   933       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
       
   934   }
       
   935   size_t middle_needs = 0;
       
   936   if (aligned_middle_new_high < middle_high()) {
       
   937     middle_needs =
       
   938       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
       
   939   }
       
   940   size_t lower_needs = 0;
       
   941   if (aligned_lower_new_high < lower_high()) {
       
   942     lower_needs =
       
   943       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
       
   944   }
       
   945 
       
   946   // Check contiguity.
       
   947   assert(middle_high_boundary() <= upper_high() &&
       
   948          upper_high() <= upper_high_boundary(),
       
   949          "high address must be contained within the region");
       
   950   assert(lower_high_boundary() <= middle_high() &&
       
   951          middle_high() <= middle_high_boundary(),
       
   952          "high address must be contained within the region");
       
   953   assert(low_boundary() <= lower_high() &&
       
   954          lower_high() <= lower_high_boundary(),
       
   955          "high address must be contained within the region");
       
   956 
       
   957   // Uncommit
       
   958   if (upper_needs > 0) {
       
   959     assert(middle_high_boundary() <= aligned_upper_new_high &&
       
   960            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
       
   961            "must not shrink beyond region");
       
   962     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
       
   963       debug_only(warning("os::uncommit_memory failed"));
       
   964       return;
       
   965     } else {
       
   966       _upper_high -= upper_needs;
       
   967     }
       
   968   }
       
   969   if (middle_needs > 0) {
       
   970     assert(lower_high_boundary() <= aligned_middle_new_high &&
       
   971            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
       
   972            "must not shrink beyond region");
       
   973     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
       
   974       debug_only(warning("os::uncommit_memory failed"));
       
   975       return;
       
   976     } else {
       
   977       _middle_high -= middle_needs;
       
   978     }
       
   979   }
       
   980   if (lower_needs > 0) {
       
   981     assert(low_boundary() <= aligned_lower_new_high &&
       
   982            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
       
   983            "must not shrink beyond region");
       
   984     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
       
   985       debug_only(warning("os::uncommit_memory failed"));
       
   986       return;
       
   987     } else {
       
   988       _lower_high -= lower_needs;
       
   989     }
       
   990   }
       
   991 
       
   992   _high -= size;
       
   993 }
       
   994 
       
   995 #ifndef PRODUCT
       
   996 void VirtualSpace::check_for_contiguity() {
       
   997   // Check contiguity.
       
   998   assert(low_boundary() <= lower_high() &&
       
   999          lower_high() <= lower_high_boundary(),
       
  1000          "high address must be contained within the region");
       
  1001   assert(lower_high_boundary() <= middle_high() &&
       
  1002          middle_high() <= middle_high_boundary(),
       
  1003          "high address must be contained within the region");
       
  1004   assert(middle_high_boundary() <= upper_high() &&
       
  1005          upper_high() <= upper_high_boundary(),
       
  1006          "high address must be contained within the region");
       
  1007   assert(low() >= low_boundary(), "low");
       
  1008   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
       
  1009   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
       
  1010   assert(high() <= upper_high(), "upper high");
       
  1011 }
       
  1012 
       
  1013 void VirtualSpace::print_on(outputStream* out) {
       
  1014   out->print   ("Virtual space:");
       
  1015   if (special()) out->print(" (pinned in memory)");
       
  1016   out->cr();
       
  1017   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
       
  1018   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
       
  1019   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
       
  1020   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
       
  1021 }
       
  1022 
       
  1023 void VirtualSpace::print() {
       
  1024   print_on(tty);
       
  1025 }
       
  1026 
       
  1027 /////////////// Unit tests ///////////////
       
  1028 
       
  1029 #ifndef PRODUCT
       
  1030 
       
  1031 #define test_log(...) \
       
  1032   do {\
       
  1033     if (VerboseInternalVMTests) { \
       
  1034       tty->print_cr(__VA_ARGS__); \
       
  1035       tty->flush(); \
       
  1036     }\
       
  1037   } while (false)
       
  1038 
       
  1039 class TestReservedSpace : AllStatic {
       
  1040  public:
       
  1041   static void small_page_write(void* addr, size_t size) {
       
  1042     size_t page_size = os::vm_page_size();
       
  1043 
       
  1044     char* end = (char*)addr + size;
       
  1045     for (char* p = (char*)addr; p < end; p += page_size) {
       
  1046       *p = 1;
       
  1047     }
       
  1048   }
       
  1049 
       
  1050   static void release_memory_for_test(ReservedSpace rs) {
       
  1051     if (rs.special()) {
       
  1052       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
       
  1053     } else {
       
  1054       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
       
  1055     }
       
  1056   }
       
  1057 
       
  1058   static void test_reserved_space1(size_t size, size_t alignment) {
       
  1059     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
       
  1060 
       
  1061     assert(is_size_aligned(size, alignment), "Incorrect input parameters");
       
  1062 
       
  1063     ReservedSpace rs(size,          // size
       
  1064                      alignment,     // alignment
       
  1065                      UseLargePages, // large
       
  1066                      (char *)NULL); // requested_address
       
  1067 
       
  1068     test_log(" rs.special() == %d", rs.special());
       
  1069 
       
  1070     assert(rs.base() != NULL, "Must be");
       
  1071     assert(rs.size() == size, "Must be");
       
  1072 
       
  1073     assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
       
  1074     assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
       
  1075 
       
  1076     if (rs.special()) {
       
  1077       small_page_write(rs.base(), size);
       
  1078     }
       
  1079 
       
  1080     release_memory_for_test(rs);
       
  1081   }
       
  1082 
       
  1083   static void test_reserved_space2(size_t size) {
       
  1084     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
       
  1085 
       
  1086     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
       
  1087 
       
  1088     ReservedSpace rs(size);
       
  1089 
       
  1090     test_log(" rs.special() == %d", rs.special());
       
  1091 
       
  1092     assert(rs.base() != NULL, "Must be");
       
  1093     assert(rs.size() == size, "Must be");
       
  1094 
       
  1095     if (rs.special()) {
       
  1096       small_page_write(rs.base(), size);
       
  1097     }
       
  1098 
       
  1099     release_memory_for_test(rs);
       
  1100   }
       
  1101 
       
  1102   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
       
  1103     test_log("test_reserved_space3(%p, %p, %d)",
       
  1104         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
       
  1105 
       
  1106     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
       
  1107     assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
       
  1108 
       
  1109     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
       
  1110 
       
  1111     ReservedSpace rs(size, alignment, large, false);
       
  1112 
       
  1113     test_log(" rs.special() == %d", rs.special());
       
  1114 
       
  1115     assert(rs.base() != NULL, "Must be");
       
  1116     assert(rs.size() == size, "Must be");
       
  1117 
       
  1118     if (rs.special()) {
       
  1119       small_page_write(rs.base(), size);
       
  1120     }
       
  1121 
       
  1122     release_memory_for_test(rs);
       
  1123   }
       
  1124 
       
  1125 
       
  1126   static void test_reserved_space1() {
       
  1127     size_t size = 2 * 1024 * 1024;
       
  1128     size_t ag   = os::vm_allocation_granularity();
       
  1129 
       
  1130     test_reserved_space1(size,      ag);
       
  1131     test_reserved_space1(size * 2,  ag);
       
  1132     test_reserved_space1(size * 10, ag);
       
  1133   }
       
  1134 
       
  1135   static void test_reserved_space2() {
       
  1136     size_t size = 2 * 1024 * 1024;
       
  1137     size_t ag = os::vm_allocation_granularity();
       
  1138 
       
  1139     test_reserved_space2(size * 1);
       
  1140     test_reserved_space2(size * 2);
       
  1141     test_reserved_space2(size * 10);
       
  1142     test_reserved_space2(ag);
       
  1143     test_reserved_space2(size - ag);
       
  1144     test_reserved_space2(size);
       
  1145     test_reserved_space2(size + ag);
       
  1146     test_reserved_space2(size * 2);
       
  1147     test_reserved_space2(size * 2 - ag);
       
  1148     test_reserved_space2(size * 2 + ag);
       
  1149     test_reserved_space2(size * 3);
       
  1150     test_reserved_space2(size * 3 - ag);
       
  1151     test_reserved_space2(size * 3 + ag);
       
  1152     test_reserved_space2(size * 10);
       
  1153     test_reserved_space2(size * 10 + size / 2);
       
  1154   }
       
  1155 
       
  1156   static void test_reserved_space3() {
       
  1157     size_t ag = os::vm_allocation_granularity();
       
  1158 
       
  1159     test_reserved_space3(ag,      ag    , false);
       
  1160     test_reserved_space3(ag * 2,  ag    , false);
       
  1161     test_reserved_space3(ag * 3,  ag    , false);
       
  1162     test_reserved_space3(ag * 2,  ag * 2, false);
       
  1163     test_reserved_space3(ag * 4,  ag * 2, false);
       
  1164     test_reserved_space3(ag * 8,  ag * 2, false);
       
  1165     test_reserved_space3(ag * 4,  ag * 4, false);
       
  1166     test_reserved_space3(ag * 8,  ag * 4, false);
       
  1167     test_reserved_space3(ag * 16, ag * 4, false);
       
  1168 
       
  1169     if (UseLargePages) {
       
  1170       size_t lp = os::large_page_size();
       
  1171 
       
  1172       // Without large pages
       
  1173       test_reserved_space3(lp,     ag * 4, false);
       
  1174       test_reserved_space3(lp * 2, ag * 4, false);
       
  1175       test_reserved_space3(lp * 4, ag * 4, false);
       
  1176       test_reserved_space3(lp,     lp    , false);
       
  1177       test_reserved_space3(lp * 2, lp    , false);
       
  1178       test_reserved_space3(lp * 3, lp    , false);
       
  1179       test_reserved_space3(lp * 2, lp * 2, false);
       
  1180       test_reserved_space3(lp * 4, lp * 2, false);
       
  1181       test_reserved_space3(lp * 8, lp * 2, false);
       
  1182 
       
  1183       // With large pages
       
  1184       test_reserved_space3(lp, ag * 4    , true);
       
  1185       test_reserved_space3(lp * 2, ag * 4, true);
       
  1186       test_reserved_space3(lp * 4, ag * 4, true);
       
  1187       test_reserved_space3(lp, lp        , true);
       
  1188       test_reserved_space3(lp * 2, lp    , true);
       
  1189       test_reserved_space3(lp * 3, lp    , true);
       
  1190       test_reserved_space3(lp * 2, lp * 2, true);
       
  1191       test_reserved_space3(lp * 4, lp * 2, true);
       
  1192       test_reserved_space3(lp * 8, lp * 2, true);
       
  1193     }
       
  1194   }
       
  1195 
       
  1196   static void test_reserved_space() {
       
  1197     test_reserved_space1();
       
  1198     test_reserved_space2();
       
  1199     test_reserved_space3();
       
  1200   }
       
  1201 };
       
  1202 
       
  1203 void TestReservedSpace_test() {
       
  1204   TestReservedSpace::test_reserved_space();
       
  1205 }
       
  1206 
       
  1207 #define assert_equals(actual, expected)     \
       
  1208   assert(actual == expected,                \
       
  1209     err_msg("Got " SIZE_FORMAT " expected " \
       
  1210       SIZE_FORMAT, actual, expected));
       
  1211 
       
  1212 #define assert_ge(value1, value2)                  \
       
  1213   assert(value1 >= value2,                         \
       
  1214     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
       
  1215       #value2 "': " SIZE_FORMAT, value1, value2));
       
  1216 
       
  1217 #define assert_lt(value1, value2)                  \
       
  1218   assert(value1 < value2,                          \
       
  1219     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
       
  1220       #value2 "': " SIZE_FORMAT, value1, value2));
       
  1221 
       
  1222 
       
  1223 class TestVirtualSpace : AllStatic {
       
  1224   enum TestLargePages {
       
  1225     Default,
       
  1226     Disable,
       
  1227     Reserve,
       
  1228     Commit
       
  1229   };
       
  1230 
       
  1231   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
       
  1232     switch(mode) {
       
  1233     default:
       
  1234     case Default:
       
  1235     case Reserve:
       
  1236       return ReservedSpace(reserve_size_aligned);
       
  1237     case Disable:
       
  1238     case Commit:
       
  1239       return ReservedSpace(reserve_size_aligned,
       
  1240                            os::vm_allocation_granularity(),
       
  1241                            /* large */ false, /* exec */ false);
       
  1242     }
       
  1243   }
       
  1244 
       
  1245   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
       
  1246     switch(mode) {
       
  1247     default:
       
  1248     case Default:
       
  1249     case Reserve:
       
  1250       return vs.initialize(rs, 0);
       
  1251     case Disable:
       
  1252       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
       
  1253     case Commit:
       
  1254       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
       
  1255     }
       
  1256   }
       
  1257 
       
  1258  public:
       
  1259   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
       
  1260                                                         TestLargePages mode = Default) {
       
  1261     size_t granularity = os::vm_allocation_granularity();
       
  1262     size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
       
  1263 
       
  1264     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
       
  1265 
       
  1266     assert(reserved.is_reserved(), "Must be");
       
  1267 
       
  1268     VirtualSpace vs;
       
  1269     bool initialized = initialize_virtual_space(vs, reserved, mode);
       
  1270     assert(initialized, "Failed to initialize VirtualSpace");
       
  1271 
       
  1272     vs.expand_by(commit_size, false);
       
  1273 
       
  1274     if (vs.special()) {
       
  1275       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
       
  1276     } else {
       
  1277       assert_ge(vs.actual_committed_size(), commit_size);
       
  1278       // Approximate the commit granularity.
       
  1279       // Make sure that we don't commit using large pages
       
  1280       // if large pages has been disabled for this VirtualSpace.
       
  1281       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
       
  1282                                    os::vm_page_size() : os::large_page_size();
       
  1283       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
       
  1284     }
       
  1285 
       
  1286     reserved.release();
       
  1287   }
       
  1288 
       
  1289   static void test_virtual_space_actual_committed_space_one_large_page() {
       
  1290     if (!UseLargePages) {
       
  1291       return;
       
  1292     }
       
  1293 
       
  1294     size_t large_page_size = os::large_page_size();
       
  1295 
       
  1296     ReservedSpace reserved(large_page_size, large_page_size, true, false);
       
  1297 
       
  1298     assert(reserved.is_reserved(), "Must be");
       
  1299 
       
  1300     VirtualSpace vs;
       
  1301     bool initialized = vs.initialize(reserved, 0);
       
  1302     assert(initialized, "Failed to initialize VirtualSpace");
       
  1303 
       
  1304     vs.expand_by(large_page_size, false);
       
  1305 
       
  1306     assert_equals(vs.actual_committed_size(), large_page_size);
       
  1307 
       
  1308     reserved.release();
       
  1309   }
       
  1310 
       
  1311   static void test_virtual_space_actual_committed_space() {
       
  1312     test_virtual_space_actual_committed_space(4 * K, 0);
       
  1313     test_virtual_space_actual_committed_space(4 * K, 4 * K);
       
  1314     test_virtual_space_actual_committed_space(8 * K, 0);
       
  1315     test_virtual_space_actual_committed_space(8 * K, 4 * K);
       
  1316     test_virtual_space_actual_committed_space(8 * K, 8 * K);
       
  1317     test_virtual_space_actual_committed_space(12 * K, 0);
       
  1318     test_virtual_space_actual_committed_space(12 * K, 4 * K);
       
  1319     test_virtual_space_actual_committed_space(12 * K, 8 * K);
       
  1320     test_virtual_space_actual_committed_space(12 * K, 12 * K);
       
  1321     test_virtual_space_actual_committed_space(64 * K, 0);
       
  1322     test_virtual_space_actual_committed_space(64 * K, 32 * K);
       
  1323     test_virtual_space_actual_committed_space(64 * K, 64 * K);
       
  1324     test_virtual_space_actual_committed_space(2 * M, 0);
       
  1325     test_virtual_space_actual_committed_space(2 * M, 4 * K);
       
  1326     test_virtual_space_actual_committed_space(2 * M, 64 * K);
       
  1327     test_virtual_space_actual_committed_space(2 * M, 1 * M);
       
  1328     test_virtual_space_actual_committed_space(2 * M, 2 * M);
       
  1329     test_virtual_space_actual_committed_space(10 * M, 0);
       
  1330     test_virtual_space_actual_committed_space(10 * M, 4 * K);
       
  1331     test_virtual_space_actual_committed_space(10 * M, 8 * K);
       
  1332     test_virtual_space_actual_committed_space(10 * M, 1 * M);
       
  1333     test_virtual_space_actual_committed_space(10 * M, 2 * M);
       
  1334     test_virtual_space_actual_committed_space(10 * M, 5 * M);
       
  1335     test_virtual_space_actual_committed_space(10 * M, 10 * M);
       
  1336   }
       
  1337 
       
  1338   static void test_virtual_space_disable_large_pages() {
       
  1339     if (!UseLargePages) {
       
  1340       return;
       
  1341     }
       
  1342     // These test cases verify that if we force VirtualSpace to disable large pages
       
  1343     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
       
  1344     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
       
  1345     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
       
  1346     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
       
  1347     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
       
  1348     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
       
  1349     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
       
  1350 
       
  1351     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
       
  1352     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
       
  1353     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
       
  1354     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
       
  1355     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
       
  1356     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
       
  1357     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
       
  1358 
       
  1359     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
       
  1360     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
       
  1361     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
       
  1362     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
       
  1363     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
       
  1364     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
       
  1365     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
       
  1366   }
       
  1367 
       
  1368   static void test_virtual_space() {
       
  1369     test_virtual_space_actual_committed_space();
       
  1370     test_virtual_space_actual_committed_space_one_large_page();
       
  1371     test_virtual_space_disable_large_pages();
       
  1372   }
       
  1373 };
       
  1374 
       
  1375 void TestVirtualSpace_test() {
       
  1376   TestVirtualSpace::test_virtual_space();
       
  1377 }
       
  1378 
       
  1379 #endif // PRODUCT
       
  1380 
       
  1381 #endif