hotspot/src/share/vm/runtime/virtualspace.cpp
changeset 30386 07146758775b
parent 30385 1dbddb2a1971
parent 30381 098f4aed1a53
child 30397 8cd4c3d60f97
equal deleted inserted replaced
30385:1dbddb2a1971 30386:07146758775b
     1 /*
       
     2  * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
       
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
       
     4  *
       
     5  * This code is free software; you can redistribute it and/or modify it
       
     6  * under the terms of the GNU General Public License version 2 only, as
       
     7  * published by the Free Software Foundation.
       
     8  *
       
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
       
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
       
    12  * version 2 for more details (a copy is included in the LICENSE file that
       
    13  * accompanied this code).
       
    14  *
       
    15  * You should have received a copy of the GNU General Public License version
       
    16  * 2 along with this work; if not, write to the Free Software Foundation,
       
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
       
    18  *
       
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
       
    20  * or visit www.oracle.com if you need additional information or have any
       
    21  * questions.
       
    22  *
       
    23  */
       
    24 
       
    25 #include "precompiled.hpp"
       
    26 #include "oops/markOop.hpp"
       
    27 #include "oops/oop.inline.hpp"
       
    28 #include "runtime/virtualspace.hpp"
       
    29 #include "services/memTracker.hpp"
       
    30 
       
    31 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
       
    32 
       
    33 // ReservedSpace
       
    34 
       
    35 // Dummy constructor
       
    36 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
       
    37     _alignment(0), _special(false), _executable(false) {
       
    38 }
       
    39 
       
    40 ReservedSpace::ReservedSpace(size_t size) {
       
    41   // Want to use large pages where possible and pad with small pages.
       
    42   size_t page_size = os::page_size_for_region_unaligned(size, 1);
       
    43   bool large_pages = page_size != (size_t)os::vm_page_size();
       
    44   // Don't force the alignment to be large page aligned,
       
    45   // since that will waste memory.
       
    46   size_t alignment = os::vm_allocation_granularity();
       
    47   initialize(size, alignment, large_pages, NULL, false);
       
    48 }
       
    49 
       
    50 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
       
    51                              bool large,
       
    52                              char* requested_address) {
       
    53   initialize(size, alignment, large, requested_address, false);
       
    54 }
       
    55 
       
    56 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
       
    57                              bool large,
       
    58                              bool executable) {
       
    59   initialize(size, alignment, large, NULL, executable);
       
    60 }
       
    61 
       
    62 // Helper method.
       
    63 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
       
    64                                            const size_t size, bool special)
       
    65 {
       
    66   if (base == requested_address || requested_address == NULL)
       
    67     return false; // did not fail
       
    68 
       
    69   if (base != NULL) {
       
    70     // Different reserve address may be acceptable in other cases
       
    71     // but for compressed oops heap should be at requested address.
       
    72     assert(UseCompressedOops, "currently requested address used only for compressed oops");
       
    73     if (PrintCompressedOopsMode) {
       
    74       tty->cr();
       
    75       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
       
    76     }
       
    77     // OS ignored requested address. Try different address.
       
    78     if (special) {
       
    79       if (!os::release_memory_special(base, size)) {
       
    80         fatal("os::release_memory_special failed");
       
    81       }
       
    82     } else {
       
    83       if (!os::release_memory(base, size)) {
       
    84         fatal("os::release_memory failed");
       
    85       }
       
    86     }
       
    87   }
       
    88   return true;
       
    89 }
       
    90 
       
    91 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
       
    92                                char* requested_address,
       
    93                                bool executable) {
       
    94   const size_t granularity = os::vm_allocation_granularity();
       
    95   assert((size & (granularity - 1)) == 0,
       
    96          "size not aligned to os::vm_allocation_granularity()");
       
    97   assert((alignment & (granularity - 1)) == 0,
       
    98          "alignment not aligned to os::vm_allocation_granularity()");
       
    99   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
       
   100          "not a power of 2");
       
   101 
       
   102   alignment = MAX2(alignment, (size_t)os::vm_page_size());
       
   103 
       
   104   _base = NULL;
       
   105   _size = 0;
       
   106   _special = false;
       
   107   _executable = executable;
       
   108   _alignment = 0;
       
   109   _noaccess_prefix = 0;
       
   110   if (size == 0) {
       
   111     return;
       
   112   }
       
   113 
       
   114   // If OS doesn't support demand paging for large page memory, we need
       
   115   // to use reserve_memory_special() to reserve and pin the entire region.
       
   116   bool special = large && !os::can_commit_large_page_memory();
       
   117   char* base = NULL;
       
   118 
       
   119   if (special) {
       
   120 
       
   121     base = os::reserve_memory_special(size, alignment, requested_address, executable);
       
   122 
       
   123     if (base != NULL) {
       
   124       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
       
   125         // OS ignored requested address. Try different address.
       
   126         return;
       
   127       }
       
   128       // Check alignment constraints.
       
   129       assert((uintptr_t) base % alignment == 0,
       
   130              err_msg("Large pages returned a non-aligned address, base: "
       
   131                  PTR_FORMAT " alignment: " PTR_FORMAT,
       
   132                  base, (void*)(uintptr_t)alignment));
       
   133       _special = true;
       
   134     } else {
       
   135       // failed; try to reserve regular memory below
       
   136       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
       
   137                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
       
   138         if (PrintCompressedOopsMode) {
       
   139           tty->cr();
       
   140           tty->print_cr("Reserve regular memory without large pages.");
       
   141         }
       
   142       }
       
   143     }
       
   144   }
       
   145 
       
   146   if (base == NULL) {
       
   147     // Optimistically assume that the OSes returns an aligned base pointer.
       
   148     // When reserving a large address range, most OSes seem to align to at
       
   149     // least 64K.
       
   150 
       
   151     // If the memory was requested at a particular address, use
       
   152     // os::attempt_reserve_memory_at() to avoid over mapping something
       
   153     // important.  If available space is not detected, return NULL.
       
   154 
       
   155     if (requested_address != 0) {
       
   156       base = os::attempt_reserve_memory_at(size, requested_address);
       
   157       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
       
   158         // OS ignored requested address. Try different address.
       
   159         base = NULL;
       
   160       }
       
   161     } else {
       
   162       base = os::reserve_memory(size, NULL, alignment);
       
   163     }
       
   164 
       
   165     if (base == NULL) return;
       
   166 
       
   167     // Check alignment constraints
       
   168     if ((((size_t)base) & (alignment - 1)) != 0) {
       
   169       // Base not aligned, retry
       
   170       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
       
   171       // Make sure that size is aligned
       
   172       size = align_size_up(size, alignment);
       
   173       base = os::reserve_memory_aligned(size, alignment);
       
   174 
       
   175       if (requested_address != 0 &&
       
   176           failed_to_reserve_as_requested(base, requested_address, size, false)) {
       
   177         // As a result of the alignment constraints, the allocated base differs
       
   178         // from the requested address. Return back to the caller who can
       
   179         // take remedial action (like try again without a requested address).
       
   180         assert(_base == NULL, "should be");
       
   181         return;
       
   182       }
       
   183     }
       
   184   }
       
   185   // Done
       
   186   _base = base;
       
   187   _size = size;
       
   188   _alignment = alignment;
       
   189 }
       
   190 
       
   191 
       
   192 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
       
   193                              bool special, bool executable) {
       
   194   assert((size % os::vm_allocation_granularity()) == 0,
       
   195          "size not allocation aligned");
       
   196   _base = base;
       
   197   _size = size;
       
   198   _alignment = alignment;
       
   199   _noaccess_prefix = 0;
       
   200   _special = special;
       
   201   _executable = executable;
       
   202 }
       
   203 
       
   204 
       
   205 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
       
   206                                         bool split, bool realloc) {
       
   207   assert(partition_size <= size(), "partition failed");
       
   208   if (split) {
       
   209     os::split_reserved_memory(base(), size(), partition_size, realloc);
       
   210   }
       
   211   ReservedSpace result(base(), partition_size, alignment, special(),
       
   212                        executable());
       
   213   return result;
       
   214 }
       
   215 
       
   216 
       
   217 ReservedSpace
       
   218 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
       
   219   assert(partition_size <= size(), "partition failed");
       
   220   ReservedSpace result(base() + partition_size, size() - partition_size,
       
   221                        alignment, special(), executable());
       
   222   return result;
       
   223 }
       
   224 
       
   225 
       
   226 size_t ReservedSpace::page_align_size_up(size_t size) {
       
   227   return align_size_up(size, os::vm_page_size());
       
   228 }
       
   229 
       
   230 
       
   231 size_t ReservedSpace::page_align_size_down(size_t size) {
       
   232   return align_size_down(size, os::vm_page_size());
       
   233 }
       
   234 
       
   235 
       
   236 size_t ReservedSpace::allocation_align_size_up(size_t size) {
       
   237   return align_size_up(size, os::vm_allocation_granularity());
       
   238 }
       
   239 
       
   240 
       
   241 size_t ReservedSpace::allocation_align_size_down(size_t size) {
       
   242   return align_size_down(size, os::vm_allocation_granularity());
       
   243 }
       
   244 
       
   245 
       
   246 void ReservedSpace::release() {
       
   247   if (is_reserved()) {
       
   248     char *real_base = _base - _noaccess_prefix;
       
   249     const size_t real_size = _size + _noaccess_prefix;
       
   250     if (special()) {
       
   251       os::release_memory_special(real_base, real_size);
       
   252     } else{
       
   253       os::release_memory(real_base, real_size);
       
   254     }
       
   255     _base = NULL;
       
   256     _size = 0;
       
   257     _noaccess_prefix = 0;
       
   258     _alignment = 0;
       
   259     _special = false;
       
   260     _executable = false;
       
   261   }
       
   262 }
       
   263 
       
   264 static size_t noaccess_prefix_size(size_t alignment) {
       
   265   return lcm(os::vm_page_size(), alignment);
       
   266 }
       
   267 
       
   268 void ReservedHeapSpace::establish_noaccess_prefix() {
       
   269   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
       
   270   _noaccess_prefix = noaccess_prefix_size(_alignment);
       
   271 
       
   272   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
       
   273     if (true
       
   274         WIN64_ONLY(&& !UseLargePages)
       
   275         AIX_ONLY(&& os::vm_page_size() != SIZE_64K)) {
       
   276       // Protect memory at the base of the allocated region.
       
   277       // If special, the page was committed (only matters on windows)
       
   278       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
       
   279         fatal("cannot protect protection page");
       
   280       }
       
   281       if (PrintCompressedOopsMode) {
       
   282         tty->cr();
       
   283         tty->print_cr("Protected page at the reserved heap base: "
       
   284                       PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
       
   285       }
       
   286       assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
       
   287     } else {
       
   288       Universe::set_narrow_oop_use_implicit_null_checks(false);
       
   289     }
       
   290   }
       
   291 
       
   292   _base += _noaccess_prefix;
       
   293   _size -= _noaccess_prefix;
       
   294   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
       
   295 }
       
   296 
       
   297 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
       
   298 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
       
   299 // might still fulfill the wishes of the caller.
       
   300 // Assures the memory is aligned to 'alignment'.
       
   301 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
       
   302 void ReservedHeapSpace::try_reserve_heap(size_t size,
       
   303                                          size_t alignment,
       
   304                                          bool large,
       
   305                                          char* requested_address) {
       
   306   if (_base != NULL) {
       
   307     // We tried before, but we didn't like the address delivered.
       
   308     release();
       
   309   }
       
   310 
       
   311   // If OS doesn't support demand paging for large page memory, we need
       
   312   // to use reserve_memory_special() to reserve and pin the entire region.
       
   313   bool special = large && !os::can_commit_large_page_memory();
       
   314   char* base = NULL;
       
   315 
       
   316   if (PrintCompressedOopsMode && Verbose) {
       
   317     tty->print("Trying to allocate at address " PTR_FORMAT " heap of size " PTR_FORMAT ".\n",
       
   318                requested_address, (address)size);
       
   319   }
       
   320 
       
   321   if (special) {
       
   322     base = os::reserve_memory_special(size, alignment, requested_address, false);
       
   323 
       
   324     if (base != NULL) {
       
   325       // Check alignment constraints.
       
   326       assert((uintptr_t) base % alignment == 0,
       
   327              err_msg("Large pages returned a non-aligned address, base: "
       
   328                      PTR_FORMAT " alignment: " PTR_FORMAT,
       
   329                      base, (void*)(uintptr_t)alignment));
       
   330       _special = true;
       
   331     }
       
   332   }
       
   333 
       
   334   if (base == NULL) {
       
   335     // Failed; try to reserve regular memory below
       
   336     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
       
   337                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
       
   338       if (PrintCompressedOopsMode) {
       
   339         tty->cr();
       
   340         tty->print_cr("Reserve regular memory without large pages.");
       
   341       }
       
   342     }
       
   343 
       
   344     // Optimistically assume that the OSes returns an aligned base pointer.
       
   345     // When reserving a large address range, most OSes seem to align to at
       
   346     // least 64K.
       
   347 
       
   348     // If the memory was requested at a particular address, use
       
   349     // os::attempt_reserve_memory_at() to avoid over mapping something
       
   350     // important.  If available space is not detected, return NULL.
       
   351 
       
   352     if (requested_address != 0) {
       
   353       base = os::attempt_reserve_memory_at(size, requested_address);
       
   354     } else {
       
   355       base = os::reserve_memory(size, NULL, alignment);
       
   356     }
       
   357   }
       
   358   if (base == NULL) { return; }
       
   359 
       
   360   // Done
       
   361   _base = base;
       
   362   _size = size;
       
   363   _alignment = alignment;
       
   364 
       
   365   // Check alignment constraints
       
   366   if ((((size_t)base) & (alignment - 1)) != 0) {
       
   367     // Base not aligned, retry.
       
   368     release();
       
   369   }
       
   370 }
       
   371 
       
   372 void ReservedHeapSpace::try_reserve_range(char *highest_start,
       
   373                                           char *lowest_start,
       
   374                                           size_t attach_point_alignment,
       
   375                                           char *aligned_heap_base_min_address,
       
   376                                           char *upper_bound,
       
   377                                           size_t size,
       
   378                                           size_t alignment,
       
   379                                           bool large) {
       
   380   const size_t attach_range = highest_start - lowest_start;
       
   381   // Cap num_attempts at possible number.
       
   382   // At least one is possible even for 0 sized attach range.
       
   383   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
       
   384   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
       
   385 
       
   386   const size_t stepsize = (attach_range == 0) ? // Only one try.
       
   387     (size_t) highest_start : align_size_up(attach_range / num_attempts_to_try, attach_point_alignment);
       
   388 
       
   389   // Try attach points from top to bottom.
       
   390   char* attach_point = highest_start;
       
   391   while (attach_point >= lowest_start  &&
       
   392          attach_point <= highest_start &&  // Avoid wrap around.
       
   393          ((_base == NULL) ||
       
   394           (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
       
   395     try_reserve_heap(size, alignment, large, attach_point);
       
   396     attach_point -= stepsize;
       
   397   }
       
   398 }
       
   399 
       
   400 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
       
   401 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
       
   402 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
       
   403 
       
   404 // Helper for heap allocation. Returns an array with addresses
       
   405 // (OS-specific) which are suited for disjoint base mode. Array is
       
   406 // NULL terminated.
       
   407 static char** get_attach_addresses_for_disjoint_mode() {
       
   408   static uint64_t addresses[] = {
       
   409      2 * SIZE_32G,
       
   410      3 * SIZE_32G,
       
   411      4 * SIZE_32G,
       
   412      8 * SIZE_32G,
       
   413     10 * SIZE_32G,
       
   414      1 * SIZE_64K * SIZE_32G,
       
   415      2 * SIZE_64K * SIZE_32G,
       
   416      3 * SIZE_64K * SIZE_32G,
       
   417      4 * SIZE_64K * SIZE_32G,
       
   418     16 * SIZE_64K * SIZE_32G,
       
   419     32 * SIZE_64K * SIZE_32G,
       
   420     34 * SIZE_64K * SIZE_32G,
       
   421     0
       
   422   };
       
   423 
       
   424   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
       
   425   // the array is sorted.
       
   426   uint i = 0;
       
   427   while (addresses[i] != 0 &&
       
   428          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
       
   429     i++;
       
   430   }
       
   431   uint start = i;
       
   432 
       
   433   // Avoid more steps than requested.
       
   434   i = 0;
       
   435   while (addresses[start+i] != 0) {
       
   436     if (i == HeapSearchSteps) {
       
   437       addresses[start+i] = 0;
       
   438       break;
       
   439     }
       
   440     i++;
       
   441   }
       
   442 
       
   443   return (char**) &addresses[start];
       
   444 }
       
   445 
       
   446 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
       
   447   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
       
   448             "can not allocate compressed oop heap for this size");
       
   449   guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
       
   450   assert(HeapBaseMinAddress > 0, "sanity");
       
   451 
       
   452   const size_t granularity = os::vm_allocation_granularity();
       
   453   assert((size & (granularity - 1)) == 0,
       
   454          "size not aligned to os::vm_allocation_granularity()");
       
   455   assert((alignment & (granularity - 1)) == 0,
       
   456          "alignment not aligned to os::vm_allocation_granularity()");
       
   457   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
       
   458          "not a power of 2");
       
   459 
       
   460   // The necessary attach point alignment for generated wish addresses.
       
   461   // This is needed to increase the chance of attaching for mmap and shmat.
       
   462   const size_t os_attach_point_alignment =
       
   463     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
       
   464     NOT_AIX(os::vm_allocation_granularity());
       
   465   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
       
   466 
       
   467   char *aligned_heap_base_min_address = (char *)align_ptr_up((void *)HeapBaseMinAddress, alignment);
       
   468   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
       
   469     noaccess_prefix_size(alignment) : 0;
       
   470 
       
   471   // Attempt to alloc at user-given address.
       
   472   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
       
   473     try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
       
   474     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
       
   475       release();
       
   476     }
       
   477   }
       
   478 
       
   479   // Keep heap at HeapBaseMinAddress.
       
   480   if (_base == NULL) {
       
   481 
       
   482     // Try to allocate the heap at addresses that allow efficient oop compression.
       
   483     // Different schemes are tried, in order of decreasing optimization potential.
       
   484     //
       
   485     // For this, try_reserve_heap() is called with the desired heap base addresses.
       
   486     // A call into the os layer to allocate at a given address can return memory
       
   487     // at a different address than requested.  Still, this might be memory at a useful
       
   488     // address. try_reserve_heap() always returns this allocated memory, as only here
       
   489     // the criteria for a good heap are checked.
       
   490 
       
   491     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
       
   492     // Give it several tries from top of range to bottom.
       
   493     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
       
   494 
       
   495       // Calc address range within we try to attach (range of possible start addresses).
       
   496       char* const highest_start = (char *)align_ptr_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
       
   497       char* const lowest_start  = (char *)align_ptr_up  (        aligned_heap_base_min_address             , attach_point_alignment);
       
   498       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
       
   499                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
       
   500     }
       
   501 
       
   502     // zerobased: Attempt to allocate in the lower 32G.
       
   503     // But leave room for the compressed class pointers, which is allocated above
       
   504     // the heap.
       
   505     char *zerobased_max = (char *)OopEncodingHeapMax;
       
   506     const size_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
       
   507     // For small heaps, save some space for compressed class pointer
       
   508     // space so it can be decoded with no base.
       
   509     if (UseCompressedClassPointers && !UseSharedSpaces &&
       
   510         OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
       
   511         (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
       
   512       zerobased_max = (char *)OopEncodingHeapMax - class_space;
       
   513     }
       
   514 
       
   515     // Give it several tries from top of range to bottom.
       
   516     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
       
   517         ((_base == NULL) ||                        // No previous try succeeded.
       
   518          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
       
   519 
       
   520       // Calc address range within we try to attach (range of possible start addresses).
       
   521       char *const highest_start = (char *)align_ptr_down(zerobased_max - size, attach_point_alignment);
       
   522       // Need to be careful about size being guaranteed to be less
       
   523       // than UnscaledOopHeapMax due to type constraints.
       
   524       char *lowest_start = aligned_heap_base_min_address;
       
   525       uint64_t unscaled_end = UnscaledOopHeapMax - size;
       
   526       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
       
   527         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
       
   528       }
       
   529       lowest_start  = (char *)align_ptr_up(lowest_start, attach_point_alignment);
       
   530       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
       
   531                         aligned_heap_base_min_address, zerobased_max, size, alignment, large);
       
   532     }
       
   533 
       
   534     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
       
   535     // implement null checks.
       
   536     noaccess_prefix = noaccess_prefix_size(alignment);
       
   537 
       
   538     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
       
   539     char** addresses = get_attach_addresses_for_disjoint_mode();
       
   540     int i = 0;
       
   541     while (addresses[i] &&                                 // End of array not yet reached.
       
   542            ((_base == NULL) ||                             // No previous try succeeded.
       
   543             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
       
   544              !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
       
   545       char* const attach_point = addresses[i];
       
   546       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
       
   547       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
       
   548       i++;
       
   549     }
       
   550 
       
   551     // Last, desperate try without any placement.
       
   552     if (_base == NULL) {
       
   553       if (PrintCompressedOopsMode && Verbose) {
       
   554         tty->print("Trying to allocate at address NULL heap of size " PTR_FORMAT ".\n", (address)size + noaccess_prefix);
       
   555       }
       
   556       initialize(size + noaccess_prefix, alignment, large, NULL, false);
       
   557     }
       
   558   }
       
   559 }
       
   560 
       
   561 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() {
       
   562 
       
   563   if (size == 0) {
       
   564     return;
       
   565   }
       
   566 
       
   567   // Heap size should be aligned to alignment, too.
       
   568   guarantee(is_size_aligned(size, alignment), "set by caller");
       
   569 
       
   570   if (UseCompressedOops) {
       
   571     initialize_compressed_heap(size, alignment, large);
       
   572     if (_size > size) {
       
   573       // We allocated heap with noaccess prefix.
       
   574       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
       
   575       // if we had to try at arbitrary address.
       
   576       establish_noaccess_prefix();
       
   577     }
       
   578   } else {
       
   579     initialize(size, alignment, large, NULL, false);
       
   580   }
       
   581 
       
   582   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
       
   583          "area must be distinguishable from marks for mark-sweep");
       
   584   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
       
   585          "area must be distinguishable from marks for mark-sweep");
       
   586 
       
   587   if (base() > 0) {
       
   588     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
       
   589   }
       
   590 }
       
   591 
       
   592 // Reserve space for code segment.  Same as Java heap only we mark this as
       
   593 // executable.
       
   594 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
       
   595                                      size_t rs_align,
       
   596                                      bool large) :
       
   597   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
       
   598   MemTracker::record_virtual_memory_type((address)base(), mtCode);
       
   599 }
       
   600 
       
   601 // VirtualSpace
       
   602 
       
   603 VirtualSpace::VirtualSpace() {
       
   604   _low_boundary           = NULL;
       
   605   _high_boundary          = NULL;
       
   606   _low                    = NULL;
       
   607   _high                   = NULL;
       
   608   _lower_high             = NULL;
       
   609   _middle_high            = NULL;
       
   610   _upper_high             = NULL;
       
   611   _lower_high_boundary    = NULL;
       
   612   _middle_high_boundary   = NULL;
       
   613   _upper_high_boundary    = NULL;
       
   614   _lower_alignment        = 0;
       
   615   _middle_alignment       = 0;
       
   616   _upper_alignment        = 0;
       
   617   _special                = false;
       
   618   _executable             = false;
       
   619 }
       
   620 
       
   621 
       
   622 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
       
   623   const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
       
   624   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
       
   625 }
       
   626 
       
   627 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
       
   628   if(!rs.is_reserved()) return false;  // allocation failed.
       
   629   assert(_low_boundary == NULL, "VirtualSpace already initialized");
       
   630   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
       
   631 
       
   632   _low_boundary  = rs.base();
       
   633   _high_boundary = low_boundary() + rs.size();
       
   634 
       
   635   _low = low_boundary();
       
   636   _high = low();
       
   637 
       
   638   _special = rs.special();
       
   639   _executable = rs.executable();
       
   640 
       
   641   // When a VirtualSpace begins life at a large size, make all future expansion
       
   642   // and shrinking occur aligned to a granularity of large pages.  This avoids
       
   643   // fragmentation of physical addresses that inhibits the use of large pages
       
   644   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
       
   645   // page size, the only spaces that get handled this way are codecache and
       
   646   // the heap itself, both of which provide a substantial performance
       
   647   // boost in many benchmarks when covered by large pages.
       
   648   //
       
   649   // No attempt is made to force large page alignment at the very top and
       
   650   // bottom of the space if they are not aligned so already.
       
   651   _lower_alignment  = os::vm_page_size();
       
   652   _middle_alignment = max_commit_granularity;
       
   653   _upper_alignment  = os::vm_page_size();
       
   654 
       
   655   // End of each region
       
   656   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
       
   657   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
       
   658   _upper_high_boundary = high_boundary();
       
   659 
       
   660   // High address of each region
       
   661   _lower_high = low_boundary();
       
   662   _middle_high = lower_high_boundary();
       
   663   _upper_high = middle_high_boundary();
       
   664 
       
   665   // commit to initial size
       
   666   if (committed_size > 0) {
       
   667     if (!expand_by(committed_size)) {
       
   668       return false;
       
   669     }
       
   670   }
       
   671   return true;
       
   672 }
       
   673 
       
   674 
       
   675 VirtualSpace::~VirtualSpace() {
       
   676   release();
       
   677 }
       
   678 
       
   679 
       
   680 void VirtualSpace::release() {
       
   681   // This does not release memory it never reserved.
       
   682   // Caller must release via rs.release();
       
   683   _low_boundary           = NULL;
       
   684   _high_boundary          = NULL;
       
   685   _low                    = NULL;
       
   686   _high                   = NULL;
       
   687   _lower_high             = NULL;
       
   688   _middle_high            = NULL;
       
   689   _upper_high             = NULL;
       
   690   _lower_high_boundary    = NULL;
       
   691   _middle_high_boundary   = NULL;
       
   692   _upper_high_boundary    = NULL;
       
   693   _lower_alignment        = 0;
       
   694   _middle_alignment       = 0;
       
   695   _upper_alignment        = 0;
       
   696   _special                = false;
       
   697   _executable             = false;
       
   698 }
       
   699 
       
   700 
       
   701 size_t VirtualSpace::committed_size() const {
       
   702   return pointer_delta(high(), low(), sizeof(char));
       
   703 }
       
   704 
       
   705 
       
   706 size_t VirtualSpace::reserved_size() const {
       
   707   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
       
   708 }
       
   709 
       
   710 
       
   711 size_t VirtualSpace::uncommitted_size()  const {
       
   712   return reserved_size() - committed_size();
       
   713 }
       
   714 
       
   715 size_t VirtualSpace::actual_committed_size() const {
       
   716   // Special VirtualSpaces commit all reserved space up front.
       
   717   if (special()) {
       
   718     return reserved_size();
       
   719   }
       
   720 
       
   721   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
       
   722   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
       
   723   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
       
   724 
       
   725 #ifdef ASSERT
       
   726   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
       
   727   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
       
   728   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
       
   729 
       
   730   if (committed_high > 0) {
       
   731     assert(committed_low == lower, "Must be");
       
   732     assert(committed_middle == middle, "Must be");
       
   733   }
       
   734 
       
   735   if (committed_middle > 0) {
       
   736     assert(committed_low == lower, "Must be");
       
   737   }
       
   738   if (committed_middle < middle) {
       
   739     assert(committed_high == 0, "Must be");
       
   740   }
       
   741 
       
   742   if (committed_low < lower) {
       
   743     assert(committed_high == 0, "Must be");
       
   744     assert(committed_middle == 0, "Must be");
       
   745   }
       
   746 #endif
       
   747 
       
   748   return committed_low + committed_middle + committed_high;
       
   749 }
       
   750 
       
   751 
       
   752 bool VirtualSpace::contains(const void* p) const {
       
   753   return low() <= (const char*) p && (const char*) p < high();
       
   754 }
       
   755 
       
   756 /*
       
   757    First we need to determine if a particular virtual space is using large
       
   758    pages.  This is done at the initialize function and only virtual spaces
       
   759    that are larger than LargePageSizeInBytes use large pages.  Once we
       
   760    have determined this, all expand_by and shrink_by calls must grow and
       
   761    shrink by large page size chunks.  If a particular request
       
   762    is within the current large page, the call to commit and uncommit memory
       
   763    can be ignored.  In the case that the low and high boundaries of this
       
   764    space is not large page aligned, the pages leading to the first large
       
   765    page address and the pages after the last large page address must be
       
   766    allocated with default pages.
       
   767 */
       
   768 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
       
   769   if (uncommitted_size() < bytes) return false;
       
   770 
       
   771   if (special()) {
       
   772     // don't commit memory if the entire space is pinned in memory
       
   773     _high += bytes;
       
   774     return true;
       
   775   }
       
   776 
       
   777   char* previous_high = high();
       
   778   char* unaligned_new_high = high() + bytes;
       
   779   assert(unaligned_new_high <= high_boundary(),
       
   780          "cannot expand by more than upper boundary");
       
   781 
       
   782   // Calculate where the new high for each of the regions should be.  If
       
   783   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
       
   784   // then the unaligned lower and upper new highs would be the
       
   785   // lower_high() and upper_high() respectively.
       
   786   char* unaligned_lower_new_high =
       
   787     MIN2(unaligned_new_high, lower_high_boundary());
       
   788   char* unaligned_middle_new_high =
       
   789     MIN2(unaligned_new_high, middle_high_boundary());
       
   790   char* unaligned_upper_new_high =
       
   791     MIN2(unaligned_new_high, upper_high_boundary());
       
   792 
       
   793   // Align the new highs based on the regions alignment.  lower and upper
       
   794   // alignment will always be default page size.  middle alignment will be
       
   795   // LargePageSizeInBytes if the actual size of the virtual space is in
       
   796   // fact larger than LargePageSizeInBytes.
       
   797   char* aligned_lower_new_high =
       
   798     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
       
   799   char* aligned_middle_new_high =
       
   800     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
       
   801   char* aligned_upper_new_high =
       
   802     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
       
   803 
       
   804   // Determine which regions need to grow in this expand_by call.
       
   805   // If you are growing in the lower region, high() must be in that
       
   806   // region so calculate the size based on high().  For the middle and
       
   807   // upper regions, determine the starting point of growth based on the
       
   808   // location of high().  By getting the MAX of the region's low address
       
   809   // (or the previous region's high address) and high(), we can tell if it
       
   810   // is an intra or inter region growth.
       
   811   size_t lower_needs = 0;
       
   812   if (aligned_lower_new_high > lower_high()) {
       
   813     lower_needs =
       
   814       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
       
   815   }
       
   816   size_t middle_needs = 0;
       
   817   if (aligned_middle_new_high > middle_high()) {
       
   818     middle_needs =
       
   819       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
       
   820   }
       
   821   size_t upper_needs = 0;
       
   822   if (aligned_upper_new_high > upper_high()) {
       
   823     upper_needs =
       
   824       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
       
   825   }
       
   826 
       
   827   // Check contiguity.
       
   828   assert(low_boundary() <= lower_high() &&
       
   829          lower_high() <= lower_high_boundary(),
       
   830          "high address must be contained within the region");
       
   831   assert(lower_high_boundary() <= middle_high() &&
       
   832          middle_high() <= middle_high_boundary(),
       
   833          "high address must be contained within the region");
       
   834   assert(middle_high_boundary() <= upper_high() &&
       
   835          upper_high() <= upper_high_boundary(),
       
   836          "high address must be contained within the region");
       
   837 
       
   838   // Commit regions
       
   839   if (lower_needs > 0) {
       
   840     assert(low_boundary() <= lower_high() &&
       
   841            lower_high() + lower_needs <= lower_high_boundary(),
       
   842            "must not expand beyond region");
       
   843     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
       
   844       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
       
   845                          ", lower_needs=" SIZE_FORMAT ", %d) failed",
       
   846                          lower_high(), lower_needs, _executable);)
       
   847       return false;
       
   848     } else {
       
   849       _lower_high += lower_needs;
       
   850     }
       
   851   }
       
   852   if (middle_needs > 0) {
       
   853     assert(lower_high_boundary() <= middle_high() &&
       
   854            middle_high() + middle_needs <= middle_high_boundary(),
       
   855            "must not expand beyond region");
       
   856     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
       
   857                            _executable)) {
       
   858       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
       
   859                          ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
       
   860                          ", %d) failed", middle_high(), middle_needs,
       
   861                          middle_alignment(), _executable);)
       
   862       return false;
       
   863     }
       
   864     _middle_high += middle_needs;
       
   865   }
       
   866   if (upper_needs > 0) {
       
   867     assert(middle_high_boundary() <= upper_high() &&
       
   868            upper_high() + upper_needs <= upper_high_boundary(),
       
   869            "must not expand beyond region");
       
   870     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
       
   871       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
       
   872                          ", upper_needs=" SIZE_FORMAT ", %d) failed",
       
   873                          upper_high(), upper_needs, _executable);)
       
   874       return false;
       
   875     } else {
       
   876       _upper_high += upper_needs;
       
   877     }
       
   878   }
       
   879 
       
   880   if (pre_touch || AlwaysPreTouch) {
       
   881     os::pretouch_memory(previous_high, unaligned_new_high);
       
   882   }
       
   883 
       
   884   _high += bytes;
       
   885   return true;
       
   886 }
       
   887 
       
   888 // A page is uncommitted if the contents of the entire page is deemed unusable.
       
   889 // Continue to decrement the high() pointer until it reaches a page boundary
       
   890 // in which case that particular page can now be uncommitted.
       
   891 void VirtualSpace::shrink_by(size_t size) {
       
   892   if (committed_size() < size)
       
   893     fatal("Cannot shrink virtual space to negative size");
       
   894 
       
   895   if (special()) {
       
   896     // don't uncommit if the entire space is pinned in memory
       
   897     _high -= size;
       
   898     return;
       
   899   }
       
   900 
       
   901   char* unaligned_new_high = high() - size;
       
   902   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
       
   903 
       
   904   // Calculate new unaligned address
       
   905   char* unaligned_upper_new_high =
       
   906     MAX2(unaligned_new_high, middle_high_boundary());
       
   907   char* unaligned_middle_new_high =
       
   908     MAX2(unaligned_new_high, lower_high_boundary());
       
   909   char* unaligned_lower_new_high =
       
   910     MAX2(unaligned_new_high, low_boundary());
       
   911 
       
   912   // Align address to region's alignment
       
   913   char* aligned_upper_new_high =
       
   914     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
       
   915   char* aligned_middle_new_high =
       
   916     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
       
   917   char* aligned_lower_new_high =
       
   918     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
       
   919 
       
   920   // Determine which regions need to shrink
       
   921   size_t upper_needs = 0;
       
   922   if (aligned_upper_new_high < upper_high()) {
       
   923     upper_needs =
       
   924       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
       
   925   }
       
   926   size_t middle_needs = 0;
       
   927   if (aligned_middle_new_high < middle_high()) {
       
   928     middle_needs =
       
   929       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
       
   930   }
       
   931   size_t lower_needs = 0;
       
   932   if (aligned_lower_new_high < lower_high()) {
       
   933     lower_needs =
       
   934       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
       
   935   }
       
   936 
       
   937   // Check contiguity.
       
   938   assert(middle_high_boundary() <= upper_high() &&
       
   939          upper_high() <= upper_high_boundary(),
       
   940          "high address must be contained within the region");
       
   941   assert(lower_high_boundary() <= middle_high() &&
       
   942          middle_high() <= middle_high_boundary(),
       
   943          "high address must be contained within the region");
       
   944   assert(low_boundary() <= lower_high() &&
       
   945          lower_high() <= lower_high_boundary(),
       
   946          "high address must be contained within the region");
       
   947 
       
   948   // Uncommit
       
   949   if (upper_needs > 0) {
       
   950     assert(middle_high_boundary() <= aligned_upper_new_high &&
       
   951            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
       
   952            "must not shrink beyond region");
       
   953     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
       
   954       debug_only(warning("os::uncommit_memory failed"));
       
   955       return;
       
   956     } else {
       
   957       _upper_high -= upper_needs;
       
   958     }
       
   959   }
       
   960   if (middle_needs > 0) {
       
   961     assert(lower_high_boundary() <= aligned_middle_new_high &&
       
   962            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
       
   963            "must not shrink beyond region");
       
   964     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
       
   965       debug_only(warning("os::uncommit_memory failed"));
       
   966       return;
       
   967     } else {
       
   968       _middle_high -= middle_needs;
       
   969     }
       
   970   }
       
   971   if (lower_needs > 0) {
       
   972     assert(low_boundary() <= aligned_lower_new_high &&
       
   973            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
       
   974            "must not shrink beyond region");
       
   975     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
       
   976       debug_only(warning("os::uncommit_memory failed"));
       
   977       return;
       
   978     } else {
       
   979       _lower_high -= lower_needs;
       
   980     }
       
   981   }
       
   982 
       
   983   _high -= size;
       
   984 }
       
   985 
       
   986 #ifndef PRODUCT
       
   987 void VirtualSpace::check_for_contiguity() {
       
   988   // Check contiguity.
       
   989   assert(low_boundary() <= lower_high() &&
       
   990          lower_high() <= lower_high_boundary(),
       
   991          "high address must be contained within the region");
       
   992   assert(lower_high_boundary() <= middle_high() &&
       
   993          middle_high() <= middle_high_boundary(),
       
   994          "high address must be contained within the region");
       
   995   assert(middle_high_boundary() <= upper_high() &&
       
   996          upper_high() <= upper_high_boundary(),
       
   997          "high address must be contained within the region");
       
   998   assert(low() >= low_boundary(), "low");
       
   999   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
       
  1000   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
       
  1001   assert(high() <= upper_high(), "upper high");
       
  1002 }
       
  1003 
       
  1004 void VirtualSpace::print_on(outputStream* out) {
       
  1005   out->print   ("Virtual space:");
       
  1006   if (special()) out->print(" (pinned in memory)");
       
  1007   out->cr();
       
  1008   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
       
  1009   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
       
  1010   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
       
  1011   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
       
  1012 }
       
  1013 
       
  1014 void VirtualSpace::print() {
       
  1015   print_on(tty);
       
  1016 }
       
  1017 
       
  1018 /////////////// Unit tests ///////////////
       
  1019 
       
  1020 #ifndef PRODUCT
       
  1021 
       
  1022 #define test_log(...) \
       
  1023   do {\
       
  1024     if (VerboseInternalVMTests) { \
       
  1025       tty->print_cr(__VA_ARGS__); \
       
  1026       tty->flush(); \
       
  1027     }\
       
  1028   } while (false)
       
  1029 
       
  1030 class TestReservedSpace : AllStatic {
       
  1031  public:
       
  1032   static void small_page_write(void* addr, size_t size) {
       
  1033     size_t page_size = os::vm_page_size();
       
  1034 
       
  1035     char* end = (char*)addr + size;
       
  1036     for (char* p = (char*)addr; p < end; p += page_size) {
       
  1037       *p = 1;
       
  1038     }
       
  1039   }
       
  1040 
       
  1041   static void release_memory_for_test(ReservedSpace rs) {
       
  1042     if (rs.special()) {
       
  1043       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
       
  1044     } else {
       
  1045       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
       
  1046     }
       
  1047   }
       
  1048 
       
  1049   static void test_reserved_space1(size_t size, size_t alignment) {
       
  1050     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
       
  1051 
       
  1052     assert(is_size_aligned(size, alignment), "Incorrect input parameters");
       
  1053 
       
  1054     ReservedSpace rs(size,          // size
       
  1055                      alignment,     // alignment
       
  1056                      UseLargePages, // large
       
  1057                      (char *)NULL); // requested_address
       
  1058 
       
  1059     test_log(" rs.special() == %d", rs.special());
       
  1060 
       
  1061     assert(rs.base() != NULL, "Must be");
       
  1062     assert(rs.size() == size, "Must be");
       
  1063 
       
  1064     assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
       
  1065     assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
       
  1066 
       
  1067     if (rs.special()) {
       
  1068       small_page_write(rs.base(), size);
       
  1069     }
       
  1070 
       
  1071     release_memory_for_test(rs);
       
  1072   }
       
  1073 
       
  1074   static void test_reserved_space2(size_t size) {
       
  1075     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
       
  1076 
       
  1077     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
       
  1078 
       
  1079     ReservedSpace rs(size);
       
  1080 
       
  1081     test_log(" rs.special() == %d", rs.special());
       
  1082 
       
  1083     assert(rs.base() != NULL, "Must be");
       
  1084     assert(rs.size() == size, "Must be");
       
  1085 
       
  1086     if (rs.special()) {
       
  1087       small_page_write(rs.base(), size);
       
  1088     }
       
  1089 
       
  1090     release_memory_for_test(rs);
       
  1091   }
       
  1092 
       
  1093   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
       
  1094     test_log("test_reserved_space3(%p, %p, %d)",
       
  1095         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
       
  1096 
       
  1097     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
       
  1098     assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
       
  1099 
       
  1100     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
       
  1101 
       
  1102     ReservedSpace rs(size, alignment, large, false);
       
  1103 
       
  1104     test_log(" rs.special() == %d", rs.special());
       
  1105 
       
  1106     assert(rs.base() != NULL, "Must be");
       
  1107     assert(rs.size() == size, "Must be");
       
  1108 
       
  1109     if (rs.special()) {
       
  1110       small_page_write(rs.base(), size);
       
  1111     }
       
  1112 
       
  1113     release_memory_for_test(rs);
       
  1114   }
       
  1115 
       
  1116 
       
  1117   static void test_reserved_space1() {
       
  1118     size_t size = 2 * 1024 * 1024;
       
  1119     size_t ag   = os::vm_allocation_granularity();
       
  1120 
       
  1121     test_reserved_space1(size,      ag);
       
  1122     test_reserved_space1(size * 2,  ag);
       
  1123     test_reserved_space1(size * 10, ag);
       
  1124   }
       
  1125 
       
  1126   static void test_reserved_space2() {
       
  1127     size_t size = 2 * 1024 * 1024;
       
  1128     size_t ag = os::vm_allocation_granularity();
       
  1129 
       
  1130     test_reserved_space2(size * 1);
       
  1131     test_reserved_space2(size * 2);
       
  1132     test_reserved_space2(size * 10);
       
  1133     test_reserved_space2(ag);
       
  1134     test_reserved_space2(size - ag);
       
  1135     test_reserved_space2(size);
       
  1136     test_reserved_space2(size + ag);
       
  1137     test_reserved_space2(size * 2);
       
  1138     test_reserved_space2(size * 2 - ag);
       
  1139     test_reserved_space2(size * 2 + ag);
       
  1140     test_reserved_space2(size * 3);
       
  1141     test_reserved_space2(size * 3 - ag);
       
  1142     test_reserved_space2(size * 3 + ag);
       
  1143     test_reserved_space2(size * 10);
       
  1144     test_reserved_space2(size * 10 + size / 2);
       
  1145   }
       
  1146 
       
  1147   static void test_reserved_space3() {
       
  1148     size_t ag = os::vm_allocation_granularity();
       
  1149 
       
  1150     test_reserved_space3(ag,      ag    , false);
       
  1151     test_reserved_space3(ag * 2,  ag    , false);
       
  1152     test_reserved_space3(ag * 3,  ag    , false);
       
  1153     test_reserved_space3(ag * 2,  ag * 2, false);
       
  1154     test_reserved_space3(ag * 4,  ag * 2, false);
       
  1155     test_reserved_space3(ag * 8,  ag * 2, false);
       
  1156     test_reserved_space3(ag * 4,  ag * 4, false);
       
  1157     test_reserved_space3(ag * 8,  ag * 4, false);
       
  1158     test_reserved_space3(ag * 16, ag * 4, false);
       
  1159 
       
  1160     if (UseLargePages) {
       
  1161       size_t lp = os::large_page_size();
       
  1162 
       
  1163       // Without large pages
       
  1164       test_reserved_space3(lp,     ag * 4, false);
       
  1165       test_reserved_space3(lp * 2, ag * 4, false);
       
  1166       test_reserved_space3(lp * 4, ag * 4, false);
       
  1167       test_reserved_space3(lp,     lp    , false);
       
  1168       test_reserved_space3(lp * 2, lp    , false);
       
  1169       test_reserved_space3(lp * 3, lp    , false);
       
  1170       test_reserved_space3(lp * 2, lp * 2, false);
       
  1171       test_reserved_space3(lp * 4, lp * 2, false);
       
  1172       test_reserved_space3(lp * 8, lp * 2, false);
       
  1173 
       
  1174       // With large pages
       
  1175       test_reserved_space3(lp, ag * 4    , true);
       
  1176       test_reserved_space3(lp * 2, ag * 4, true);
       
  1177       test_reserved_space3(lp * 4, ag * 4, true);
       
  1178       test_reserved_space3(lp, lp        , true);
       
  1179       test_reserved_space3(lp * 2, lp    , true);
       
  1180       test_reserved_space3(lp * 3, lp    , true);
       
  1181       test_reserved_space3(lp * 2, lp * 2, true);
       
  1182       test_reserved_space3(lp * 4, lp * 2, true);
       
  1183       test_reserved_space3(lp * 8, lp * 2, true);
       
  1184     }
       
  1185   }
       
  1186 
       
  1187   static void test_reserved_space() {
       
  1188     test_reserved_space1();
       
  1189     test_reserved_space2();
       
  1190     test_reserved_space3();
       
  1191   }
       
  1192 };
       
  1193 
       
  1194 void TestReservedSpace_test() {
       
  1195   TestReservedSpace::test_reserved_space();
       
  1196 }
       
  1197 
       
  1198 #define assert_equals(actual, expected)     \
       
  1199   assert(actual == expected,                \
       
  1200     err_msg("Got " SIZE_FORMAT " expected " \
       
  1201       SIZE_FORMAT, actual, expected));
       
  1202 
       
  1203 #define assert_ge(value1, value2)                  \
       
  1204   assert(value1 >= value2,                         \
       
  1205     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
       
  1206       #value2 "': " SIZE_FORMAT, value1, value2));
       
  1207 
       
  1208 #define assert_lt(value1, value2)                  \
       
  1209   assert(value1 < value2,                          \
       
  1210     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
       
  1211       #value2 "': " SIZE_FORMAT, value1, value2));
       
  1212 
       
  1213 
       
  1214 class TestVirtualSpace : AllStatic {
       
  1215   enum TestLargePages {
       
  1216     Default,
       
  1217     Disable,
       
  1218     Reserve,
       
  1219     Commit
       
  1220   };
       
  1221 
       
  1222   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
       
  1223     switch(mode) {
       
  1224     default:
       
  1225     case Default:
       
  1226     case Reserve:
       
  1227       return ReservedSpace(reserve_size_aligned);
       
  1228     case Disable:
       
  1229     case Commit:
       
  1230       return ReservedSpace(reserve_size_aligned,
       
  1231                            os::vm_allocation_granularity(),
       
  1232                            /* large */ false, /* exec */ false);
       
  1233     }
       
  1234   }
       
  1235 
       
  1236   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
       
  1237     switch(mode) {
       
  1238     default:
       
  1239     case Default:
       
  1240     case Reserve:
       
  1241       return vs.initialize(rs, 0);
       
  1242     case Disable:
       
  1243       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
       
  1244     case Commit:
       
  1245       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
       
  1246     }
       
  1247   }
       
  1248 
       
  1249  public:
       
  1250   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
       
  1251                                                         TestLargePages mode = Default) {
       
  1252     size_t granularity = os::vm_allocation_granularity();
       
  1253     size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
       
  1254 
       
  1255     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
       
  1256 
       
  1257     assert(reserved.is_reserved(), "Must be");
       
  1258 
       
  1259     VirtualSpace vs;
       
  1260     bool initialized = initialize_virtual_space(vs, reserved, mode);
       
  1261     assert(initialized, "Failed to initialize VirtualSpace");
       
  1262 
       
  1263     vs.expand_by(commit_size, false);
       
  1264 
       
  1265     if (vs.special()) {
       
  1266       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
       
  1267     } else {
       
  1268       assert_ge(vs.actual_committed_size(), commit_size);
       
  1269       // Approximate the commit granularity.
       
  1270       // Make sure that we don't commit using large pages
       
  1271       // if large pages has been disabled for this VirtualSpace.
       
  1272       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
       
  1273                                    os::vm_page_size() : os::large_page_size();
       
  1274       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
       
  1275     }
       
  1276 
       
  1277     reserved.release();
       
  1278   }
       
  1279 
       
  1280   static void test_virtual_space_actual_committed_space_one_large_page() {
       
  1281     if (!UseLargePages) {
       
  1282       return;
       
  1283     }
       
  1284 
       
  1285     size_t large_page_size = os::large_page_size();
       
  1286 
       
  1287     ReservedSpace reserved(large_page_size, large_page_size, true, false);
       
  1288 
       
  1289     assert(reserved.is_reserved(), "Must be");
       
  1290 
       
  1291     VirtualSpace vs;
       
  1292     bool initialized = vs.initialize(reserved, 0);
       
  1293     assert(initialized, "Failed to initialize VirtualSpace");
       
  1294 
       
  1295     vs.expand_by(large_page_size, false);
       
  1296 
       
  1297     assert_equals(vs.actual_committed_size(), large_page_size);
       
  1298 
       
  1299     reserved.release();
       
  1300   }
       
  1301 
       
  1302   static void test_virtual_space_actual_committed_space() {
       
  1303     test_virtual_space_actual_committed_space(4 * K, 0);
       
  1304     test_virtual_space_actual_committed_space(4 * K, 4 * K);
       
  1305     test_virtual_space_actual_committed_space(8 * K, 0);
       
  1306     test_virtual_space_actual_committed_space(8 * K, 4 * K);
       
  1307     test_virtual_space_actual_committed_space(8 * K, 8 * K);
       
  1308     test_virtual_space_actual_committed_space(12 * K, 0);
       
  1309     test_virtual_space_actual_committed_space(12 * K, 4 * K);
       
  1310     test_virtual_space_actual_committed_space(12 * K, 8 * K);
       
  1311     test_virtual_space_actual_committed_space(12 * K, 12 * K);
       
  1312     test_virtual_space_actual_committed_space(64 * K, 0);
       
  1313     test_virtual_space_actual_committed_space(64 * K, 32 * K);
       
  1314     test_virtual_space_actual_committed_space(64 * K, 64 * K);
       
  1315     test_virtual_space_actual_committed_space(2 * M, 0);
       
  1316     test_virtual_space_actual_committed_space(2 * M, 4 * K);
       
  1317     test_virtual_space_actual_committed_space(2 * M, 64 * K);
       
  1318     test_virtual_space_actual_committed_space(2 * M, 1 * M);
       
  1319     test_virtual_space_actual_committed_space(2 * M, 2 * M);
       
  1320     test_virtual_space_actual_committed_space(10 * M, 0);
       
  1321     test_virtual_space_actual_committed_space(10 * M, 4 * K);
       
  1322     test_virtual_space_actual_committed_space(10 * M, 8 * K);
       
  1323     test_virtual_space_actual_committed_space(10 * M, 1 * M);
       
  1324     test_virtual_space_actual_committed_space(10 * M, 2 * M);
       
  1325     test_virtual_space_actual_committed_space(10 * M, 5 * M);
       
  1326     test_virtual_space_actual_committed_space(10 * M, 10 * M);
       
  1327   }
       
  1328 
       
  1329   static void test_virtual_space_disable_large_pages() {
       
  1330     if (!UseLargePages) {
       
  1331       return;
       
  1332     }
       
  1333     // These test cases verify that if we force VirtualSpace to disable large pages
       
  1334     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
       
  1335     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
       
  1336     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
       
  1337     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
       
  1338     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
       
  1339     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
       
  1340     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
       
  1341 
       
  1342     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
       
  1343     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
       
  1344     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
       
  1345     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
       
  1346     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
       
  1347     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
       
  1348     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
       
  1349 
       
  1350     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
       
  1351     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
       
  1352     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
       
  1353     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
       
  1354     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
       
  1355     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
       
  1356     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
       
  1357   }
       
  1358 
       
  1359   static void test_virtual_space() {
       
  1360     test_virtual_space_actual_committed_space();
       
  1361     test_virtual_space_actual_committed_space_one_large_page();
       
  1362     test_virtual_space_disable_large_pages();
       
  1363   }
       
  1364 };
       
  1365 
       
  1366 void TestVirtualSpace_test() {
       
  1367   TestVirtualSpace::test_virtual_space();
       
  1368 }
       
  1369 
       
  1370 #endif // PRODUCT
       
  1371 
       
  1372 #endif