hotspot/src/os/linux/vm/os_linux.cpp
changeset 46619 a3919f5e8d2b
parent 46618 d503911aa948
child 46625 edefffab74e2
equal deleted inserted replaced
46618:d503911aa948 46619:a3919f5e8d2b
   576   size_t size;
   576   size_t size;
   577   volatile char *p;
   577   volatile char *p;
   578 
   578 
   579   // Adjust bottom to point to the largest address within the same page, it
   579   // Adjust bottom to point to the largest address within the same page, it
   580   // gives us a one-page buffer if alloca() allocates slightly more memory.
   580   // gives us a one-page buffer if alloca() allocates slightly more memory.
   581   bottom = (address)align_size_down((uintptr_t)bottom, os::Linux::page_size());
   581   bottom = (address)align_down((uintptr_t)bottom, os::Linux::page_size());
   582   bottom += os::Linux::page_size() - 1;
   582   bottom += os::Linux::page_size() - 1;
   583 
   583 
   584   // sp might be slightly above current stack pointer; if that's the case, we
   584   // sp might be slightly above current stack pointer; if that's the case, we
   585   // will alloca() a little more space than necessary, which is OK. Don't use
   585   // will alloca() a little more space than necessary, which is OK. Don't use
   586   // os::current_stack_pointer(), as its result can be slightly below current
   586   // os::current_stack_pointer(), as its result can be slightly below current
   713   // of zero due to overflow. Don't add the guard page in that case.
   713   // of zero due to overflow. Don't add the guard page in that case.
   714   size_t guard_size = os::Linux::default_guard_size(thr_type);
   714   size_t guard_size = os::Linux::default_guard_size(thr_type);
   715   if (stack_size <= SIZE_MAX - guard_size) {
   715   if (stack_size <= SIZE_MAX - guard_size) {
   716     stack_size += guard_size;
   716     stack_size += guard_size;
   717   }
   717   }
   718   assert(is_size_aligned(stack_size, os::vm_page_size()), "stack_size not aligned");
   718   assert(is_aligned(stack_size, os::vm_page_size()), "stack_size not aligned");
   719 
   719 
   720   int status = pthread_attr_setstacksize(&attr, stack_size);
   720   int status = pthread_attr_setstacksize(&attr, stack_size);
   721   assert_status(status == 0, status, "pthread_attr_setstacksize");
   721   assert_status(status == 0, status, "pthread_attr_setstacksize");
   722 
   722 
   723   // Configure glibc guard page.
   723   // Configure glibc guard page.
  1099     stack_top = stack_start;
  1099     stack_top = stack_start;
  1100     stack_size -= 16 * page_size();
  1100     stack_size -= 16 * page_size();
  1101   }
  1101   }
  1102 
  1102 
  1103   // stack_top could be partially down the page so align it
  1103   // stack_top could be partially down the page so align it
  1104   stack_top = align_size_up(stack_top, page_size());
  1104   stack_top = align_up(stack_top, page_size());
  1105 
  1105 
  1106   // Allowed stack value is minimum of max_size and what we derived from rlimit
  1106   // Allowed stack value is minimum of max_size and what we derived from rlimit
  1107   if (max_size > 0) {
  1107   if (max_size > 0) {
  1108     _initial_thread_stack_size = MIN2(max_size, stack_size);
  1108     _initial_thread_stack_size = MIN2(max_size, stack_size);
  1109   } else {
  1109   } else {
  1110     // Accept the rlimit max, but if stack is unlimited then it will be huge, so
  1110     // Accept the rlimit max, but if stack is unlimited then it will be huge, so
  1111     // clamp it at 8MB as we do on Solaris
  1111     // clamp it at 8MB as we do on Solaris
  1112     _initial_thread_stack_size = MIN2(stack_size, 8*M);
  1112     _initial_thread_stack_size = MIN2(stack_size, 8*M);
  1113   }
  1113   }
  1114   _initial_thread_stack_size = align_size_down(_initial_thread_stack_size, page_size());
  1114   _initial_thread_stack_size = align_down(_initial_thread_stack_size, page_size());
  1115   _initial_thread_stack_bottom = (address)stack_top - _initial_thread_stack_size;
  1115   _initial_thread_stack_bottom = (address)stack_top - _initial_thread_stack_size;
  1116 
  1116 
  1117   assert(_initial_thread_stack_bottom < (address)stack_top, "overflow!");
  1117   assert(_initial_thread_stack_bottom < (address)stack_top, "overflow!");
  1118 
  1118 
  1119   if (log_is_enabled(Info, os, thread)) {
  1119   if (log_is_enabled(Info, os, thread)) {
  3168       if (start != req_addr) {
  3168       if (start != req_addr) {
  3169         ::munmap(start, extra_size);
  3169         ::munmap(start, extra_size);
  3170         start = NULL;
  3170         start = NULL;
  3171       }
  3171       }
  3172     } else {
  3172     } else {
  3173       char* const start_aligned = align_ptr_up(start, alignment);
  3173       char* const start_aligned = align_up(start, alignment);
  3174       char* const end_aligned = start_aligned + bytes;
  3174       char* const end_aligned = start_aligned + bytes;
  3175       char* const end = start + extra_size;
  3175       char* const end = start + extra_size;
  3176       if (start_aligned > start) {
  3176       if (start_aligned > start) {
  3177         ::munmap(start, start_aligned - start);
  3177         ::munmap(start, start_aligned - start);
  3178       }
  3178       }
  3198   return anon_munmap(addr, size);
  3198   return anon_munmap(addr, size);
  3199 }
  3199 }
  3200 
  3200 
  3201 static bool linux_mprotect(char* addr, size_t size, int prot) {
  3201 static bool linux_mprotect(char* addr, size_t size, int prot) {
  3202   // Linux wants the mprotect address argument to be page aligned.
  3202   // Linux wants the mprotect address argument to be page aligned.
  3203   char* bottom = (char*)align_size_down((intptr_t)addr, os::Linux::page_size());
  3203   char* bottom = (char*)align_down((intptr_t)addr, os::Linux::page_size());
  3204 
  3204 
  3205   // According to SUSv3, mprotect() should only be used with mappings
  3205   // According to SUSv3, mprotect() should only be used with mappings
  3206   // established by mmap(), and mmap() always maps whole pages. Unaligned
  3206   // established by mmap(), and mmap() always maps whole pages. Unaligned
  3207   // 'addr' likely indicates problem in the VM (e.g. trying to change
  3207   // 'addr' likely indicates problem in the VM (e.g. trying to change
  3208   // protection of malloc'ed or statically allocated memory). Check the
  3208   // protection of malloc'ed or statically allocated memory). Check the
  3209   // caller if you hit this assert.
  3209   // caller if you hit this assert.
  3210   assert(addr == bottom, "sanity check");
  3210   assert(addr == bottom, "sanity check");
  3211 
  3211 
  3212   size = align_size_up(pointer_delta(addr, bottom, 1) + size, os::Linux::page_size());
  3212   size = align_up(pointer_delta(addr, bottom, 1) + size, os::Linux::page_size());
  3213   return ::mprotect(bottom, size, prot) == 0;
  3213   return ::mprotect(bottom, size, prot) == 0;
  3214 }
  3214 }
  3215 
  3215 
  3216 // Set protections specified
  3216 // Set protections specified
  3217 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
  3217 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
  3242   bool result = false;
  3242   bool result = false;
  3243   void *p = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE,
  3243   void *p = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE,
  3244                  MAP_ANONYMOUS|MAP_PRIVATE,
  3244                  MAP_ANONYMOUS|MAP_PRIVATE,
  3245                  -1, 0);
  3245                  -1, 0);
  3246   if (p != MAP_FAILED) {
  3246   if (p != MAP_FAILED) {
  3247     void *aligned_p = align_ptr_up(p, page_size);
  3247     void *aligned_p = align_up(p, page_size);
  3248 
  3248 
  3249     result = madvise(aligned_p, page_size, MADV_HUGEPAGE) == 0;
  3249     result = madvise(aligned_p, page_size, MADV_HUGEPAGE) == 0;
  3250 
  3250 
  3251     munmap(p, page_size * 2);
  3251     munmap(p, page_size * 2);
  3252   }
  3252   }
  3485     int err = errno;                               \
  3485     int err = errno;                               \
  3486     shm_warning_format(str " (error = %d)", err);  \
  3486     shm_warning_format(str " (error = %d)", err);  \
  3487   } while (0)
  3487   } while (0)
  3488 
  3488 
  3489 static char* shmat_with_alignment(int shmid, size_t bytes, size_t alignment) {
  3489 static char* shmat_with_alignment(int shmid, size_t bytes, size_t alignment) {
  3490   assert(is_size_aligned(bytes, alignment), "Must be divisible by the alignment");
  3490   assert(is_aligned(bytes, alignment), "Must be divisible by the alignment");
  3491 
  3491 
  3492   if (!is_size_aligned(alignment, SHMLBA)) {
  3492   if (!is_aligned(alignment, SHMLBA)) {
  3493     assert(false, "Code below assumes that alignment is at least SHMLBA aligned");
  3493     assert(false, "Code below assumes that alignment is at least SHMLBA aligned");
  3494     return NULL;
  3494     return NULL;
  3495   }
  3495   }
  3496 
  3496 
  3497   // To ensure that we get 'alignment' aligned memory from shmat,
  3497   // To ensure that we get 'alignment' aligned memory from shmat,
  3523 
  3523 
  3524   return addr;
  3524   return addr;
  3525 }
  3525 }
  3526 
  3526 
  3527 static char* shmat_at_address(int shmid, char* req_addr) {
  3527 static char* shmat_at_address(int shmid, char* req_addr) {
  3528   if (!is_ptr_aligned(req_addr, SHMLBA)) {
  3528   if (!is_aligned(req_addr, SHMLBA)) {
  3529     assert(false, "Requested address needs to be SHMLBA aligned");
  3529     assert(false, "Requested address needs to be SHMLBA aligned");
  3530     return NULL;
  3530     return NULL;
  3531   }
  3531   }
  3532 
  3532 
  3533   char* addr = (char*)shmat(shmid, req_addr, 0);
  3533   char* addr = (char*)shmat(shmid, req_addr, 0);
  3541 }
  3541 }
  3542 
  3542 
  3543 static char* shmat_large_pages(int shmid, size_t bytes, size_t alignment, char* req_addr) {
  3543 static char* shmat_large_pages(int shmid, size_t bytes, size_t alignment, char* req_addr) {
  3544   // If a req_addr has been provided, we assume that the caller has already aligned the address.
  3544   // If a req_addr has been provided, we assume that the caller has already aligned the address.
  3545   if (req_addr != NULL) {
  3545   if (req_addr != NULL) {
  3546     assert(is_ptr_aligned(req_addr, os::large_page_size()), "Must be divisible by the large page size");
  3546     assert(is_aligned(req_addr, os::large_page_size()), "Must be divisible by the large page size");
  3547     assert(is_ptr_aligned(req_addr, alignment), "Must be divisible by given alignment");
  3547     assert(is_aligned(req_addr, alignment), "Must be divisible by given alignment");
  3548     return shmat_at_address(shmid, req_addr);
  3548     return shmat_at_address(shmid, req_addr);
  3549   }
  3549   }
  3550 
  3550 
  3551   // Since shmid has been setup with SHM_HUGETLB, shmat will automatically
  3551   // Since shmid has been setup with SHM_HUGETLB, shmat will automatically
  3552   // return large page size aligned memory addresses when req_addr == NULL.
  3552   // return large page size aligned memory addresses when req_addr == NULL.
  3553   // However, if the alignment is larger than the large page size, we have
  3553   // However, if the alignment is larger than the large page size, we have
  3554   // to manually ensure that the memory returned is 'alignment' aligned.
  3554   // to manually ensure that the memory returned is 'alignment' aligned.
  3555   if (alignment > os::large_page_size()) {
  3555   if (alignment > os::large_page_size()) {
  3556     assert(is_size_aligned(alignment, os::large_page_size()), "Must be divisible by the large page size");
  3556     assert(is_aligned(alignment, os::large_page_size()), "Must be divisible by the large page size");
  3557     return shmat_with_alignment(shmid, bytes, alignment);
  3557     return shmat_with_alignment(shmid, bytes, alignment);
  3558   } else {
  3558   } else {
  3559     return shmat_at_address(shmid, NULL);
  3559     return shmat_at_address(shmid, NULL);
  3560   }
  3560   }
  3561 }
  3561 }
  3563 char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment,
  3563 char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment,
  3564                                             char* req_addr, bool exec) {
  3564                                             char* req_addr, bool exec) {
  3565   // "exec" is passed in but not used.  Creating the shared image for
  3565   // "exec" is passed in but not used.  Creating the shared image for
  3566   // the code cache doesn't have an SHM_X executable permission to check.
  3566   // the code cache doesn't have an SHM_X executable permission to check.
  3567   assert(UseLargePages && UseSHM, "only for SHM large pages");
  3567   assert(UseLargePages && UseSHM, "only for SHM large pages");
  3568   assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address");
  3568   assert(is_aligned(req_addr, os::large_page_size()), "Unaligned address");
  3569   assert(is_ptr_aligned(req_addr, alignment), "Unaligned address");
  3569   assert(is_aligned(req_addr, alignment), "Unaligned address");
  3570 
  3570 
  3571   if (!is_size_aligned(bytes, os::large_page_size())) {
  3571   if (!is_aligned(bytes, os::large_page_size())) {
  3572     return NULL; // Fallback to small pages.
  3572     return NULL; // Fallback to small pages.
  3573   }
  3573   }
  3574 
  3574 
  3575   // Create a large shared memory region to attach to based on size.
  3575   // Create a large shared memory region to attach to based on size.
  3576   // Currently, size is the total size of the heap.
  3576   // Currently, size is the total size of the heap.
  3625 
  3625 
  3626 char* os::Linux::reserve_memory_special_huge_tlbfs_only(size_t bytes,
  3626 char* os::Linux::reserve_memory_special_huge_tlbfs_only(size_t bytes,
  3627                                                         char* req_addr,
  3627                                                         char* req_addr,
  3628                                                         bool exec) {
  3628                                                         bool exec) {
  3629   assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
  3629   assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
  3630   assert(is_size_aligned(bytes, os::large_page_size()), "Unaligned size");
  3630   assert(is_aligned(bytes, os::large_page_size()), "Unaligned size");
  3631   assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address");
  3631   assert(is_aligned(req_addr, os::large_page_size()), "Unaligned address");
  3632 
  3632 
  3633   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
  3633   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
  3634   char* addr = (char*)::mmap(req_addr, bytes, prot,
  3634   char* addr = (char*)::mmap(req_addr, bytes, prot,
  3635                              MAP_PRIVATE|MAP_ANONYMOUS|MAP_HUGETLB,
  3635                              MAP_PRIVATE|MAP_ANONYMOUS|MAP_HUGETLB,
  3636                              -1, 0);
  3636                              -1, 0);
  3638   if (addr == MAP_FAILED) {
  3638   if (addr == MAP_FAILED) {
  3639     warn_on_large_pages_failure(req_addr, bytes, errno);
  3639     warn_on_large_pages_failure(req_addr, bytes, errno);
  3640     return NULL;
  3640     return NULL;
  3641   }
  3641   }
  3642 
  3642 
  3643   assert(is_ptr_aligned(addr, os::large_page_size()), "Must be");
  3643   assert(is_aligned(addr, os::large_page_size()), "Must be");
  3644 
  3644 
  3645   return addr;
  3645   return addr;
  3646 }
  3646 }
  3647 
  3647 
  3648 // Reserve memory using mmap(MAP_HUGETLB).
  3648 // Reserve memory using mmap(MAP_HUGETLB).
  3657                                                          char* req_addr,
  3657                                                          char* req_addr,
  3658                                                          bool exec) {
  3658                                                          bool exec) {
  3659   size_t large_page_size = os::large_page_size();
  3659   size_t large_page_size = os::large_page_size();
  3660   assert(bytes >= large_page_size, "Shouldn't allocate large pages for small sizes");
  3660   assert(bytes >= large_page_size, "Shouldn't allocate large pages for small sizes");
  3661 
  3661 
  3662   assert(is_ptr_aligned(req_addr, alignment), "Must be");
  3662   assert(is_aligned(req_addr, alignment), "Must be");
  3663   assert(is_size_aligned(bytes, alignment), "Must be");
  3663   assert(is_aligned(bytes, alignment), "Must be");
  3664 
  3664 
  3665   // First reserve - but not commit - the address range in small pages.
  3665   // First reserve - but not commit - the address range in small pages.
  3666   char* const start = anon_mmap_aligned(bytes, alignment, req_addr);
  3666   char* const start = anon_mmap_aligned(bytes, alignment, req_addr);
  3667 
  3667 
  3668   if (start == NULL) {
  3668   if (start == NULL) {
  3669     return NULL;
  3669     return NULL;
  3670   }
  3670   }
  3671 
  3671 
  3672   assert(is_ptr_aligned(start, alignment), "Must be");
  3672   assert(is_aligned(start, alignment), "Must be");
  3673 
  3673 
  3674   char* end = start + bytes;
  3674   char* end = start + bytes;
  3675 
  3675 
  3676   // Find the regions of the allocated chunk that can be promoted to large pages.
  3676   // Find the regions of the allocated chunk that can be promoted to large pages.
  3677   char* lp_start = align_ptr_up(start, large_page_size);
  3677   char* lp_start = align_up(start, large_page_size);
  3678   char* lp_end   = align_ptr_down(end, large_page_size);
  3678   char* lp_end   = align_down(end, large_page_size);
  3679 
  3679 
  3680   size_t lp_bytes = lp_end - lp_start;
  3680   size_t lp_bytes = lp_end - lp_start;
  3681 
  3681 
  3682   assert(is_size_aligned(lp_bytes, large_page_size), "Must be");
  3682   assert(is_aligned(lp_bytes, large_page_size), "Must be");
  3683 
  3683 
  3684   if (lp_bytes == 0) {
  3684   if (lp_bytes == 0) {
  3685     // The mapped region doesn't even span the start and the end of a large page.
  3685     // The mapped region doesn't even span the start and the end of a large page.
  3686     // Fall back to allocate a non-special area.
  3686     // Fall back to allocate a non-special area.
  3687     ::munmap(start, end - start);
  3687     ::munmap(start, end - start);
  3738 char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes,
  3738 char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes,
  3739                                                    size_t alignment,
  3739                                                    size_t alignment,
  3740                                                    char* req_addr,
  3740                                                    char* req_addr,
  3741                                                    bool exec) {
  3741                                                    bool exec) {
  3742   assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
  3742   assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
  3743   assert(is_ptr_aligned(req_addr, alignment), "Must be");
  3743   assert(is_aligned(req_addr, alignment), "Must be");
  3744   assert(is_size_aligned(alignment, os::vm_allocation_granularity()), "Must be");
  3744   assert(is_aligned(alignment, os::vm_allocation_granularity()), "Must be");
  3745   assert(is_power_of_2(os::large_page_size()), "Must be");
  3745   assert(is_power_of_2(os::large_page_size()), "Must be");
  3746   assert(bytes >= os::large_page_size(), "Shouldn't allocate large pages for small sizes");
  3746   assert(bytes >= os::large_page_size(), "Shouldn't allocate large pages for small sizes");
  3747 
  3747 
  3748   if (is_size_aligned(bytes, os::large_page_size()) && alignment <= os::large_page_size()) {
  3748   if (is_aligned(bytes, os::large_page_size()) && alignment <= os::large_page_size()) {
  3749     return reserve_memory_special_huge_tlbfs_only(bytes, req_addr, exec);
  3749     return reserve_memory_special_huge_tlbfs_only(bytes, req_addr, exec);
  3750   } else {
  3750   } else {
  3751     return reserve_memory_special_huge_tlbfs_mixed(bytes, alignment, req_addr, exec);
  3751     return reserve_memory_special_huge_tlbfs_mixed(bytes, alignment, req_addr, exec);
  3752   }
  3752   }
  3753 }
  3753 }
  5965     test_log("%s, req_addr NULL:", __FUNCTION__);
  5965     test_log("%s, req_addr NULL:", __FUNCTION__);
  5966     test_log("size            align           result");
  5966     test_log("size            align           result");
  5967 
  5967 
  5968     for (int i = 0; i < num_sizes; i++) {
  5968     for (int i = 0; i < num_sizes; i++) {
  5969       const size_t size = sizes[i];
  5969       const size_t size = sizes[i];
  5970       for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
  5970       for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
  5971         char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false);
  5971         char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false);
  5972         test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " ->  " PTR_FORMAT " %s",
  5972         test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " ->  " PTR_FORMAT " %s",
  5973                  size, alignment, p2i(p), (p != NULL ? "" : "(failed)"));
  5973                  size, alignment, p2i(p), (p != NULL ? "" : "(failed)"));
  5974         if (p != NULL) {
  5974         if (p != NULL) {
  5975           assert(is_ptr_aligned(p, alignment), "must be");
  5975           assert(is_aligned(p, alignment), "must be");
  5976           small_page_write(p, size);
  5976           small_page_write(p, size);
  5977           os::Linux::release_memory_special_huge_tlbfs(p, size);
  5977           os::Linux::release_memory_special_huge_tlbfs(p, size);
  5978         }
  5978         }
  5979       }
  5979       }
  5980     }
  5980     }
  5983     test_log("%s, req_addr non-NULL:", __FUNCTION__);
  5983     test_log("%s, req_addr non-NULL:", __FUNCTION__);
  5984     test_log("size            align           req_addr         result");
  5984     test_log("size            align           req_addr         result");
  5985 
  5985 
  5986     for (int i = 0; i < num_sizes; i++) {
  5986     for (int i = 0; i < num_sizes; i++) {
  5987       const size_t size = sizes[i];
  5987       const size_t size = sizes[i];
  5988       for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
  5988       for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
  5989         char* const req_addr = align_ptr_up(mapping1, alignment);
  5989         char* const req_addr = align_up(mapping1, alignment);
  5990         char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
  5990         char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
  5991         test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " " PTR_FORMAT " ->  " PTR_FORMAT " %s",
  5991         test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " " PTR_FORMAT " ->  " PTR_FORMAT " %s",
  5992                  size, alignment, p2i(req_addr), p2i(p),
  5992                  size, alignment, p2i(req_addr), p2i(p),
  5993                  ((p != NULL ? (p == req_addr ? "(exact match)" : "") : "(failed)")));
  5993                  ((p != NULL ? (p == req_addr ? "(exact match)" : "") : "(failed)")));
  5994         if (p != NULL) {
  5994         if (p != NULL) {
  6003     test_log("%s, req_addr non-NULL with preexisting mapping:", __FUNCTION__);
  6003     test_log("%s, req_addr non-NULL with preexisting mapping:", __FUNCTION__);
  6004     test_log("size            align           req_addr         result");
  6004     test_log("size            align           req_addr         result");
  6005 
  6005 
  6006     for (int i = 0; i < num_sizes; i++) {
  6006     for (int i = 0; i < num_sizes; i++) {
  6007       const size_t size = sizes[i];
  6007       const size_t size = sizes[i];
  6008       for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
  6008       for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
  6009         char* const req_addr = align_ptr_up(mapping2, alignment);
  6009         char* const req_addr = align_up(mapping2, alignment);
  6010         char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
  6010         char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
  6011         test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " " PTR_FORMAT " ->  " PTR_FORMAT " %s",
  6011         test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " " PTR_FORMAT " ->  " PTR_FORMAT " %s",
  6012                  size, alignment, p2i(req_addr), p2i(p), ((p != NULL ? "" : "(failed)")));
  6012                  size, alignment, p2i(req_addr), p2i(p), ((p != NULL ? "" : "(failed)")));
  6013         // as the area around req_addr contains already existing mappings, the API should always
  6013         // as the area around req_addr contains already existing mappings, the API should always
  6014         // return NULL (as per contract, it cannot return another address)
  6014         // return NULL (as per contract, it cannot return another address)
  6037     test_log("test_reserve_memory_special_shm(" SIZE_FORMAT ", " SIZE_FORMAT ")", size, alignment);
  6037     test_log("test_reserve_memory_special_shm(" SIZE_FORMAT ", " SIZE_FORMAT ")", size, alignment);
  6038 
  6038 
  6039     char* addr = os::Linux::reserve_memory_special_shm(size, alignment, NULL, false);
  6039     char* addr = os::Linux::reserve_memory_special_shm(size, alignment, NULL, false);
  6040 
  6040 
  6041     if (addr != NULL) {
  6041     if (addr != NULL) {
  6042       assert(is_ptr_aligned(addr, alignment), "Check");
  6042       assert(is_aligned(addr, alignment), "Check");
  6043       assert(is_ptr_aligned(addr, os::large_page_size()), "Check");
  6043       assert(is_aligned(addr, os::large_page_size()), "Check");
  6044 
  6044 
  6045       small_page_write(addr, size);
  6045       small_page_write(addr, size);
  6046 
  6046 
  6047       os::Linux::release_memory_special_shm(addr, size);
  6047       os::Linux::release_memory_special_shm(addr, size);
  6048     }
  6048     }
  6051   static void test_reserve_memory_special_shm() {
  6051   static void test_reserve_memory_special_shm() {
  6052     size_t lp = os::large_page_size();
  6052     size_t lp = os::large_page_size();
  6053     size_t ag = os::vm_allocation_granularity();
  6053     size_t ag = os::vm_allocation_granularity();
  6054 
  6054 
  6055     for (size_t size = ag; size < lp * 3; size += ag) {
  6055     for (size_t size = ag; size < lp * 3; size += ag) {
  6056       for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
  6056       for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
  6057         test_reserve_memory_special_shm(size, alignment);
  6057         test_reserve_memory_special_shm(size, alignment);
  6058       }
  6058       }
  6059     }
  6059     }
  6060   }
  6060   }
  6061 
  6061