hotspot/src/share/vm/memory/virtualspace.cpp
changeset 46619 a3919f5e8d2b
parent 46618 d503911aa948
child 46620 750c6edff33b
equal deleted inserted replaced
46618:d503911aa948 46619:a3919f5e8d2b
    45   size_t alignment;
    45   size_t alignment;
    46   if (large_pages && has_preferred_page_size) {
    46   if (large_pages && has_preferred_page_size) {
    47     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
    47     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
    48     // ReservedSpace initialization requires size to be aligned to the given
    48     // ReservedSpace initialization requires size to be aligned to the given
    49     // alignment. Align the size up.
    49     // alignment. Align the size up.
    50     size = align_size_up(size, alignment);
    50     size = align_up(size, alignment);
    51   } else {
    51   } else {
    52     // Don't force the alignment to be large page aligned,
    52     // Don't force the alignment to be large page aligned,
    53     // since that will waste memory.
    53     // since that will waste memory.
    54     alignment = os::vm_allocation_granularity();
    54     alignment = os::vm_allocation_granularity();
    55   }
    55   }
   170     // Check alignment constraints
   170     // Check alignment constraints
   171     if ((((size_t)base) & (alignment - 1)) != 0) {
   171     if ((((size_t)base) & (alignment - 1)) != 0) {
   172       // Base not aligned, retry
   172       // Base not aligned, retry
   173       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
   173       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
   174       // Make sure that size is aligned
   174       // Make sure that size is aligned
   175       size = align_size_up(size, alignment);
   175       size = align_up(size, alignment);
   176       base = os::reserve_memory_aligned(size, alignment);
   176       base = os::reserve_memory_aligned(size, alignment);
   177 
   177 
   178       if (requested_address != 0 &&
   178       if (requested_address != 0 &&
   179           failed_to_reserve_as_requested(base, requested_address, size, false)) {
   179           failed_to_reserve_as_requested(base, requested_address, size, false)) {
   180         // As a result of the alignment constraints, the allocated base differs
   180         // As a result of the alignment constraints, the allocated base differs
   225   return result;
   225   return result;
   226 }
   226 }
   227 
   227 
   228 
   228 
   229 size_t ReservedSpace::page_align_size_up(size_t size) {
   229 size_t ReservedSpace::page_align_size_up(size_t size) {
   230   return align_size_up(size, os::vm_page_size());
   230   return align_up(size, os::vm_page_size());
   231 }
   231 }
   232 
   232 
   233 
   233 
   234 size_t ReservedSpace::page_align_size_down(size_t size) {
   234 size_t ReservedSpace::page_align_size_down(size_t size) {
   235   return align_size_down(size, os::vm_page_size());
   235   return align_down(size, os::vm_page_size());
   236 }
   236 }
   237 
   237 
   238 
   238 
   239 size_t ReservedSpace::allocation_align_size_up(size_t size) {
   239 size_t ReservedSpace::allocation_align_size_up(size_t size) {
   240   return align_size_up(size, os::vm_allocation_granularity());
   240   return align_up(size, os::vm_allocation_granularity());
   241 }
   241 }
   242 
   242 
   243 
   243 
   244 size_t ReservedSpace::allocation_align_size_down(size_t size) {
   244 size_t ReservedSpace::allocation_align_size_down(size_t size) {
   245   return align_size_down(size, os::vm_allocation_granularity());
   245   return align_down(size, os::vm_allocation_granularity());
   246 }
   246 }
   247 
   247 
   248 
   248 
   249 void ReservedSpace::release() {
   249 void ReservedSpace::release() {
   250   if (is_reserved()) {
   250   if (is_reserved()) {
   381   // At least one is possible even for 0 sized attach range.
   381   // At least one is possible even for 0 sized attach range.
   382   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
   382   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
   383   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
   383   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
   384 
   384 
   385   const size_t stepsize = (attach_range == 0) ? // Only one try.
   385   const size_t stepsize = (attach_range == 0) ? // Only one try.
   386     (size_t) highest_start : align_size_up(attach_range / num_attempts_to_try, attach_point_alignment);
   386     (size_t) highest_start : align_up(attach_range / num_attempts_to_try, attach_point_alignment);
   387 
   387 
   388   // Try attach points from top to bottom.
   388   // Try attach points from top to bottom.
   389   char* attach_point = highest_start;
   389   char* attach_point = highest_start;
   390   while (attach_point >= lowest_start  &&
   390   while (attach_point >= lowest_start  &&
   391          attach_point <= highest_start &&  // Avoid wrap around.
   391          attach_point <= highest_start &&  // Avoid wrap around.
   461   const size_t os_attach_point_alignment =
   461   const size_t os_attach_point_alignment =
   462     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
   462     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
   463     NOT_AIX(os::vm_allocation_granularity());
   463     NOT_AIX(os::vm_allocation_granularity());
   464   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
   464   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
   465 
   465 
   466   char *aligned_heap_base_min_address = (char *)align_ptr_up((void *)HeapBaseMinAddress, alignment);
   466   char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment);
   467   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
   467   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
   468     noaccess_prefix_size(alignment) : 0;
   468     noaccess_prefix_size(alignment) : 0;
   469 
   469 
   470   // Attempt to alloc at user-given address.
   470   // Attempt to alloc at user-given address.
   471   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
   471   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
   490     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
   490     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
   491     // Give it several tries from top of range to bottom.
   491     // Give it several tries from top of range to bottom.
   492     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
   492     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
   493 
   493 
   494       // Calc address range within we try to attach (range of possible start addresses).
   494       // Calc address range within we try to attach (range of possible start addresses).
   495       char* const highest_start = align_ptr_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
   495       char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
   496       char* const lowest_start  = align_ptr_up(aligned_heap_base_min_address, attach_point_alignment);
   496       char* const lowest_start  = align_up(aligned_heap_base_min_address, attach_point_alignment);
   497       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
   497       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
   498                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
   498                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
   499     }
   499     }
   500 
   500 
   501     // zerobased: Attempt to allocate in the lower 32G.
   501     // zerobased: Attempt to allocate in the lower 32G.
   502     // But leave room for the compressed class pointers, which is allocated above
   502     // But leave room for the compressed class pointers, which is allocated above
   503     // the heap.
   503     // the heap.
   504     char *zerobased_max = (char *)OopEncodingHeapMax;
   504     char *zerobased_max = (char *)OopEncodingHeapMax;
   505     const size_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
   505     const size_t class_space = align_up(CompressedClassSpaceSize, alignment);
   506     // For small heaps, save some space for compressed class pointer
   506     // For small heaps, save some space for compressed class pointer
   507     // space so it can be decoded with no base.
   507     // space so it can be decoded with no base.
   508     if (UseCompressedClassPointers && !UseSharedSpaces &&
   508     if (UseCompressedClassPointers && !UseSharedSpaces &&
   509         OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
   509         OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
   510         (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
   510         (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
   515     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
   515     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
   516         ((_base == NULL) ||                        // No previous try succeeded.
   516         ((_base == NULL) ||                        // No previous try succeeded.
   517          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
   517          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
   518 
   518 
   519       // Calc address range within we try to attach (range of possible start addresses).
   519       // Calc address range within we try to attach (range of possible start addresses).
   520       char *const highest_start = align_ptr_down(zerobased_max - size, attach_point_alignment);
   520       char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
   521       // Need to be careful about size being guaranteed to be less
   521       // Need to be careful about size being guaranteed to be less
   522       // than UnscaledOopHeapMax due to type constraints.
   522       // than UnscaledOopHeapMax due to type constraints.
   523       char *lowest_start = aligned_heap_base_min_address;
   523       char *lowest_start = aligned_heap_base_min_address;
   524       uint64_t unscaled_end = UnscaledOopHeapMax - size;
   524       uint64_t unscaled_end = UnscaledOopHeapMax - size;
   525       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
   525       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
   526         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
   526         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
   527       }
   527       }
   528       lowest_start = align_ptr_up(lowest_start, attach_point_alignment);
   528       lowest_start = align_up(lowest_start, attach_point_alignment);
   529       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
   529       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
   530                         aligned_heap_base_min_address, zerobased_max, size, alignment, large);
   530                         aligned_heap_base_min_address, zerobased_max, size, alignment, large);
   531     }
   531     }
   532 
   532 
   533     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
   533     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
   560   if (size == 0) {
   560   if (size == 0) {
   561     return;
   561     return;
   562   }
   562   }
   563 
   563 
   564   // Heap size should be aligned to alignment, too.
   564   // Heap size should be aligned to alignment, too.
   565   guarantee(is_size_aligned(size, alignment), "set by caller");
   565   guarantee(is_aligned(size, alignment), "set by caller");
   566 
   566 
   567   if (UseCompressedOops) {
   567   if (UseCompressedOops) {
   568     initialize_compressed_heap(size, alignment, large);
   568     initialize_compressed_heap(size, alignment, large);
   569     if (_size > size) {
   569     if (_size > size) {
   570       // We allocated heap with noaccess prefix.
   570       // We allocated heap with noaccess prefix.
   749 bool VirtualSpace::contains(const void* p) const {
   749 bool VirtualSpace::contains(const void* p) const {
   750   return low() <= (const char*) p && (const char*) p < high();
   750   return low() <= (const char*) p && (const char*) p < high();
   751 }
   751 }
   752 
   752 
   753 static void pretouch_expanded_memory(void* start, void* end) {
   753 static void pretouch_expanded_memory(void* start, void* end) {
   754   assert(is_ptr_aligned(start, os::vm_page_size()), "Unexpected alignment");
   754   assert(is_aligned(start, os::vm_page_size()), "Unexpected alignment");
   755   assert(is_ptr_aligned(end,   os::vm_page_size()), "Unexpected alignment");
   755   assert(is_aligned(end,   os::vm_page_size()), "Unexpected alignment");
   756 
   756 
   757   os::pretouch_memory(start, end);
   757   os::pretouch_memory(start, end);
   758 }
   758 }
   759 
   759 
   760 static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) {
   760 static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) {
  1035   }
  1035   }
  1036 
  1036 
  1037   static void test_reserved_space1(size_t size, size_t alignment) {
  1037   static void test_reserved_space1(size_t size, size_t alignment) {
  1038     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
  1038     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
  1039 
  1039 
  1040     assert(is_size_aligned(size, alignment), "Incorrect input parameters");
  1040     assert(is_aligned(size, alignment), "Incorrect input parameters");
  1041 
  1041 
  1042     ReservedSpace rs(size,          // size
  1042     ReservedSpace rs(size,          // size
  1043                      alignment,     // alignment
  1043                      alignment,     // alignment
  1044                      UseLargePages, // large
  1044                      UseLargePages, // large
  1045                      (char *)NULL); // requested_address
  1045                      (char *)NULL); // requested_address
  1047     test_log(" rs.special() == %d", rs.special());
  1047     test_log(" rs.special() == %d", rs.special());
  1048 
  1048 
  1049     assert(rs.base() != NULL, "Must be");
  1049     assert(rs.base() != NULL, "Must be");
  1050     assert(rs.size() == size, "Must be");
  1050     assert(rs.size() == size, "Must be");
  1051 
  1051 
  1052     assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
  1052     assert(is_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
  1053     assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
  1053     assert(is_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
  1054 
  1054 
  1055     if (rs.special()) {
  1055     if (rs.special()) {
  1056       small_page_write(rs.base(), size);
  1056       small_page_write(rs.base(), size);
  1057     }
  1057     }
  1058 
  1058 
  1060   }
  1060   }
  1061 
  1061 
  1062   static void test_reserved_space2(size_t size) {
  1062   static void test_reserved_space2(size_t size) {
  1063     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
  1063     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
  1064 
  1064 
  1065     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
  1065     assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
  1066 
  1066 
  1067     ReservedSpace rs(size);
  1067     ReservedSpace rs(size);
  1068 
  1068 
  1069     test_log(" rs.special() == %d", rs.special());
  1069     test_log(" rs.special() == %d", rs.special());
  1070 
  1070 
  1086       // Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test.
  1086       // Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test.
  1087       assert((size_t)os::vm_page_size() == os::large_page_size(), "Test needs further refinement");
  1087       assert((size_t)os::vm_page_size() == os::large_page_size(), "Test needs further refinement");
  1088       return;
  1088       return;
  1089     }
  1089     }
  1090 
  1090 
  1091     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
  1091     assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
  1092     assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
  1092     assert(is_aligned(size, alignment), "Must be at least aligned against alignment");
  1093 
  1093 
  1094     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
  1094     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
  1095 
  1095 
  1096     ReservedSpace rs(size, alignment, large, false);
  1096     ReservedSpace rs(size, alignment, large, false);
  1097 
  1097 
  1242 
  1242 
  1243  public:
  1243  public:
  1244   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
  1244   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
  1245                                                         TestLargePages mode = Default) {
  1245                                                         TestLargePages mode = Default) {
  1246     size_t granularity = os::vm_allocation_granularity();
  1246     size_t granularity = os::vm_allocation_granularity();
  1247     size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
  1247     size_t reserve_size_aligned = align_up(reserve_size, granularity);
  1248 
  1248 
  1249     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
  1249     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
  1250 
  1250 
  1251     assert(reserved.is_reserved(), "Must be");
  1251     assert(reserved.is_reserved(), "Must be");
  1252 
  1252