--- a/hotspot/src/os/bsd/vm/os_bsd.cpp Mon Aug 26 09:33:01 2013 +0200
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp Mon Aug 26 07:01:23 2013 -0700
@@ -2325,7 +2325,9 @@
}
-char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
+char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
+ fatal("This code is not used or maintained.");
+
// "exec" is passed in but not used. Creating the shared image for
// the code cache doesn't have an SHM_X executable permission to check.
assert(UseLargePages && UseSHM, "only for SHM large pages");
@@ -4752,3 +4754,8 @@
return n;
}
+#ifndef PRODUCT
+void TestReserveMemorySpecial_test() {
+ // No tests available for this platform
+}
+#endif
--- a/hotspot/src/os/linux/vm/globals_linux.hpp Mon Aug 26 09:33:01 2013 +0200
+++ b/hotspot/src/os/linux/vm/globals_linux.hpp Mon Aug 26 07:01:23 2013 -0700
@@ -40,6 +40,9 @@
product(bool, UseHugeTLBFS, false, \
"Use MAP_HUGETLB for large pages") \
\
+ product(bool, UseTransparentHugePages, false, \
+ "Use MADV_HUGEPAGE for large pages") \
+ \
product(bool, LoadExecStackDllInVMThread, true, \
"Load DLLs with executable-stack attribute in the VM Thread") \
\
--- a/hotspot/src/os/linux/vm/os_linux.cpp Mon Aug 26 09:33:01 2013 +0200
+++ b/hotspot/src/os/linux/vm/os_linux.cpp Mon Aug 26 07:01:23 2013 -0700
@@ -2720,36 +2720,7 @@
int os::Linux::commit_memory_impl(char* addr, size_t size,
size_t alignment_hint, bool exec) {
- int err;
- if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
- int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
- uintptr_t res =
- (uintptr_t) ::mmap(addr, size, prot,
- MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS|MAP_HUGETLB,
- -1, 0);
- if (res != (uintptr_t) MAP_FAILED) {
- if (UseNUMAInterleaving) {
- numa_make_global(addr, size);
- }
- return 0;
- }
-
- err = errno; // save errno from mmap() call above
-
- if (!recoverable_mmap_error(err)) {
- // However, it is not clear that this loss of our reserved mapping
- // happens with large pages on Linux or that we cannot recover
- // from the loss. For now, we just issue a warning and we don't
- // call vm_exit_out_of_memory(). This issue is being tracked by
- // JBS-8007074.
- warn_fail_commit_memory(addr, size, alignment_hint, exec, err);
-// vm_exit_out_of_memory(size, OOM_MMAP_ERROR,
-// "committing reserved memory.");
- }
- // Fall through and try to use small pages
- }
-
- err = os::Linux::commit_memory_impl(addr, size, exec);
+ int err = os::Linux::commit_memory_impl(addr, size, exec);
if (err == 0) {
realign_memory(addr, size, alignment_hint);
}
@@ -2774,7 +2745,7 @@
}
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
- if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
+ if (UseTransparentHugePages && alignment_hint > (size_t)vm_page_size()) {
// We don't check the return value: madvise(MADV_HUGEPAGE) may not
// be supported or the memory may already be backed by huge pages.
::madvise(addr, bytes, MADV_HUGEPAGE);
@@ -2787,7 +2758,7 @@
// uncommitted at all. We don't do anything in this case to avoid creating a segment with
// small pages on top of the SHM segment. This method always works for small pages, so we
// allow that in any case.
- if (alignment_hint <= (size_t)os::vm_page_size() || !UseSHM) {
+ if (alignment_hint <= (size_t)os::vm_page_size() || can_commit_large_page_memory()) {
commit_memory(addr, bytes, alignment_hint, !ExecMem);
}
}
@@ -3157,11 +3128,31 @@
return linux_mprotect(addr, size, PROT_READ|PROT_WRITE);
}
+bool os::Linux::transparent_huge_pages_sanity_check(bool warn, size_t page_size) {
+ bool result = false;
+ void *p = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE,
+ MAP_ANONYMOUS|MAP_PRIVATE,
+ -1, 0);
+ if (p != MAP_FAILED) {
+ void *aligned_p = align_ptr_up(p, page_size);
+
+ result = madvise(aligned_p, page_size, MADV_HUGEPAGE) == 0;
+
+ munmap(p, page_size * 2);
+ }
+
+ if (warn && !result) {
+ warning("TransparentHugePages is not supported by the operating system.");
+ }
+
+ return result;
+}
+
bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {
bool result = false;
- void *p = mmap (NULL, page_size, PROT_READ|PROT_WRITE,
- MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
- -1, 0);
+ void *p = mmap(NULL, page_size, PROT_READ|PROT_WRITE,
+ MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
+ -1, 0);
if (p != MAP_FAILED) {
// We don't know if this really is a huge page or not.
@@ -3182,12 +3173,10 @@
}
fclose(fp);
}
- munmap (p, page_size);
- if (result)
- return true;
- }
-
- if (warn) {
+ munmap(p, page_size);
+ }
+
+ if (warn && !result) {
warning("HugeTLBFS is not supported by the operating system.");
}
@@ -3235,82 +3224,114 @@
static size_t _large_page_size = 0;
-void os::large_page_init() {
- if (!UseLargePages) {
- UseHugeTLBFS = false;
- UseSHM = false;
- return;
- }
-
- if (FLAG_IS_DEFAULT(UseHugeTLBFS) && FLAG_IS_DEFAULT(UseSHM)) {
- // If UseLargePages is specified on the command line try both methods,
- // if it's default, then try only HugeTLBFS.
- if (FLAG_IS_DEFAULT(UseLargePages)) {
- UseHugeTLBFS = true;
- } else {
- UseHugeTLBFS = UseSHM = true;
- }
- }
-
- if (LargePageSizeInBytes) {
- _large_page_size = LargePageSizeInBytes;
- } else {
- // large_page_size on Linux is used to round up heap size. x86 uses either
- // 2M or 4M page, depending on whether PAE (Physical Address Extensions)
- // mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
- // page as large as 256M.
- //
- // Here we try to figure out page size by parsing /proc/meminfo and looking
- // for a line with the following format:
- // Hugepagesize: 2048 kB
- //
- // If we can't determine the value (e.g. /proc is not mounted, or the text
- // format has been changed), we'll use the largest page size supported by
- // the processor.
+size_t os::Linux::find_large_page_size() {
+ size_t large_page_size = 0;
+
+ // large_page_size on Linux is used to round up heap size. x86 uses either
+ // 2M or 4M page, depending on whether PAE (Physical Address Extensions)
+ // mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
+ // page as large as 256M.
+ //
+ // Here we try to figure out page size by parsing /proc/meminfo and looking
+ // for a line with the following format:
+ // Hugepagesize: 2048 kB
+ //
+ // If we can't determine the value (e.g. /proc is not mounted, or the text
+ // format has been changed), we'll use the largest page size supported by
+ // the processor.
#ifndef ZERO
- _large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) IA64_ONLY(256 * M) SPARC_ONLY(4 * M)
- ARM_ONLY(2 * M) PPC_ONLY(4 * M);
+ large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) IA64_ONLY(256 * M) SPARC_ONLY(4 * M)
+ ARM_ONLY(2 * M) PPC_ONLY(4 * M);
#endif // ZERO
- FILE *fp = fopen("/proc/meminfo", "r");
- if (fp) {
- while (!feof(fp)) {
- int x = 0;
- char buf[16];
- if (fscanf(fp, "Hugepagesize: %d", &x) == 1) {
- if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) {
- _large_page_size = x * K;
- break;
- }
- } else {
- // skip to next line
- for (;;) {
- int ch = fgetc(fp);
- if (ch == EOF || ch == (int)'\n') break;
- }
+ FILE *fp = fopen("/proc/meminfo", "r");
+ if (fp) {
+ while (!feof(fp)) {
+ int x = 0;
+ char buf[16];
+ if (fscanf(fp, "Hugepagesize: %d", &x) == 1) {
+ if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) {
+ large_page_size = x * K;
+ break;
+ }
+ } else {
+ // skip to next line
+ for (;;) {
+ int ch = fgetc(fp);
+ if (ch == EOF || ch == (int)'\n') break;
}
}
- fclose(fp);
}
- }
-
- // print a warning if any large page related flag is specified on command line
- bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS);
-
+ fclose(fp);
+ }
+
+ if (!FLAG_IS_DEFAULT(LargePageSizeInBytes) && LargePageSizeInBytes != large_page_size) {
+ warning("Setting LargePageSizeInBytes has no effect on this OS. Large page size is "
+ SIZE_FORMAT "%s.", byte_size_in_proper_unit(large_page_size),
+ proper_unit_for_byte_size(large_page_size));
+ }
+
+ return large_page_size;
+}
+
+size_t os::Linux::setup_large_page_size() {
+ _large_page_size = Linux::find_large_page_size();
const size_t default_page_size = (size_t)Linux::page_size();
if (_large_page_size > default_page_size) {
_page_sizes[0] = _large_page_size;
_page_sizes[1] = default_page_size;
_page_sizes[2] = 0;
}
- UseHugeTLBFS = UseHugeTLBFS &&
- Linux::hugetlbfs_sanity_check(warn_on_failure, _large_page_size);
-
- if (UseHugeTLBFS)
+
+ return _large_page_size;
+}
+
+bool os::Linux::setup_large_page_type(size_t page_size) {
+ if (FLAG_IS_DEFAULT(UseHugeTLBFS) &&
+ FLAG_IS_DEFAULT(UseSHM) &&
+ FLAG_IS_DEFAULT(UseTransparentHugePages)) {
+ // If UseLargePages is specified on the command line try all methods,
+ // if it's default, then try only UseTransparentHugePages.
+ if (FLAG_IS_DEFAULT(UseLargePages)) {
+ UseTransparentHugePages = true;
+ } else {
+ UseHugeTLBFS = UseTransparentHugePages = UseSHM = true;
+ }
+ }
+
+ if (UseTransparentHugePages) {
+ bool warn_on_failure = !FLAG_IS_DEFAULT(UseTransparentHugePages);
+ if (transparent_huge_pages_sanity_check(warn_on_failure, page_size)) {
+ UseHugeTLBFS = false;
+ UseSHM = false;
+ return true;
+ }
+ UseTransparentHugePages = false;
+ }
+
+ if (UseHugeTLBFS) {
+ bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS);
+ if (hugetlbfs_sanity_check(warn_on_failure, page_size)) {
+ UseSHM = false;
+ return true;
+ }
+ UseHugeTLBFS = false;
+ }
+
+ return UseSHM;
+}
+
+void os::large_page_init() {
+ if (!UseLargePages) {
+ UseHugeTLBFS = false;
+ UseTransparentHugePages = false;
UseSHM = false;
-
- UseLargePages = UseHugeTLBFS || UseSHM;
+ return;
+ }
+
+ size_t large_page_size = Linux::setup_large_page_size();
+ UseLargePages = Linux::setup_large_page_type(large_page_size);
set_coredump_filter();
}
@@ -3319,16 +3340,22 @@
#define SHM_HUGETLB 04000
#endif
-char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
+char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec) {
// "exec" is passed in but not used. Creating the shared image for
// the code cache doesn't have an SHM_X executable permission to check.
assert(UseLargePages && UseSHM, "only for SHM large pages");
+ assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address");
+
+ if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
+ return NULL; // Fallback to small pages.
+ }
key_t key = IPC_PRIVATE;
char *addr;
bool warn_on_failure = UseLargePages &&
(!FLAG_IS_DEFAULT(UseLargePages) ||
+ !FLAG_IS_DEFAULT(UseSHM) ||
!FLAG_IS_DEFAULT(LargePageSizeInBytes)
);
char msg[128];
@@ -3376,42 +3403,219 @@
return NULL;
}
- if ((addr != NULL) && UseNUMAInterleaving) {
- numa_make_global(addr, bytes);
- }
-
- // The memory is committed
- MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
+ return addr;
+}
+
+static void warn_on_large_pages_failure(char* req_addr, size_t bytes, int error) {
+ assert(error == ENOMEM, "Only expect to fail if no memory is available");
+
+ bool warn_on_failure = UseLargePages &&
+ (!FLAG_IS_DEFAULT(UseLargePages) ||
+ !FLAG_IS_DEFAULT(UseHugeTLBFS) ||
+ !FLAG_IS_DEFAULT(LargePageSizeInBytes));
+
+ if (warn_on_failure) {
+ char msg[128];
+ jio_snprintf(msg, sizeof(msg), "Failed to reserve large pages memory req_addr: "
+ PTR_FORMAT " bytes: " SIZE_FORMAT " (errno = %d).", req_addr, bytes, error);
+ warning(msg);
+ }
+}
+
+char* os::Linux::reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec) {
+ assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
+ assert(is_size_aligned(bytes, os::large_page_size()), "Unaligned size");
+ assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address");
+
+ int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
+ char* addr = (char*)::mmap(req_addr, bytes, prot,
+ MAP_PRIVATE|MAP_ANONYMOUS|MAP_HUGETLB,
+ -1, 0);
+
+ if (addr == MAP_FAILED) {
+ warn_on_large_pages_failure(req_addr, bytes, errno);
+ return NULL;
+ }
+
+ assert(is_ptr_aligned(addr, os::large_page_size()), "Must be");
return addr;
}
+char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec) {
+ size_t large_page_size = os::large_page_size();
+
+ assert(bytes >= large_page_size, "Shouldn't allocate large pages for small sizes");
+
+ // Allocate small pages.
+
+ char* start;
+ if (req_addr != NULL) {
+ assert(is_ptr_aligned(req_addr, alignment), "Must be");
+ assert(is_size_aligned(bytes, alignment), "Must be");
+ start = os::reserve_memory(bytes, req_addr);
+ assert(start == NULL || start == req_addr, "Must be");
+ } else {
+ start = os::reserve_memory_aligned(bytes, alignment);
+ }
+
+ if (start == NULL) {
+ return NULL;
+ }
+
+ assert(is_ptr_aligned(start, alignment), "Must be");
+
+ // os::reserve_memory_special will record this memory area.
+ // Need to release it here to prevent overlapping reservations.
+ MemTracker::record_virtual_memory_release((address)start, bytes);
+
+ char* end = start + bytes;
+
+ // Find the regions of the allocated chunk that can be promoted to large pages.
+ char* lp_start = (char*)align_ptr_up(start, large_page_size);
+ char* lp_end = (char*)align_ptr_down(end, large_page_size);
+
+ size_t lp_bytes = lp_end - lp_start;
+
+ assert(is_size_aligned(lp_bytes, large_page_size), "Must be");
+
+ if (lp_bytes == 0) {
+ // The mapped region doesn't even span the start and the end of a large page.
+ // Fall back to allocate a non-special area.
+ ::munmap(start, end - start);
+ return NULL;
+ }
+
+ int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
+
+
+ void* result;
+
+ if (start != lp_start) {
+ result = ::mmap(start, lp_start - start, prot,
+ MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
+ -1, 0);
+ if (result == MAP_FAILED) {
+ ::munmap(lp_start, end - lp_start);
+ return NULL;
+ }
+ }
+
+ result = ::mmap(lp_start, lp_bytes, prot,
+ MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED|MAP_HUGETLB,
+ -1, 0);
+ if (result == MAP_FAILED) {
+ warn_on_large_pages_failure(req_addr, bytes, errno);
+ // If the mmap above fails, the large pages region will be unmapped and we
+ // have regions before and after with small pages. Release these regions.
+ //
+ // | mapped | unmapped | mapped |
+ // ^ ^ ^ ^
+ // start lp_start lp_end end
+ //
+ ::munmap(start, lp_start - start);
+ ::munmap(lp_end, end - lp_end);
+ return NULL;
+ }
+
+ if (lp_end != end) {
+ result = ::mmap(lp_end, end - lp_end, prot,
+ MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
+ -1, 0);
+ if (result == MAP_FAILED) {
+ ::munmap(start, lp_end - start);
+ return NULL;
+ }
+ }
+
+ return start;
+}
+
+char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec) {
+ assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
+ assert(is_ptr_aligned(req_addr, alignment), "Must be");
+ assert(is_power_of_2(alignment), "Must be");
+ assert(is_power_of_2(os::large_page_size()), "Must be");
+ assert(bytes >= os::large_page_size(), "Shouldn't allocate large pages for small sizes");
+
+ if (is_size_aligned(bytes, os::large_page_size()) && alignment <= os::large_page_size()) {
+ return reserve_memory_special_huge_tlbfs_only(bytes, req_addr, exec);
+ } else {
+ return reserve_memory_special_huge_tlbfs_mixed(bytes, alignment, req_addr, exec);
+ }
+}
+
+char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
+ assert(UseLargePages, "only for large pages");
+
+ char* addr;
+ if (UseSHM) {
+ addr = os::Linux::reserve_memory_special_shm(bytes, alignment, req_addr, exec);
+ } else {
+ assert(UseHugeTLBFS, "must be");
+ addr = os::Linux::reserve_memory_special_huge_tlbfs(bytes, alignment, req_addr, exec);
+ }
+
+ if (addr != NULL) {
+ if (UseNUMAInterleaving) {
+ numa_make_global(addr, bytes);
+ }
+
+ // The memory is committed
+ MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
+ }
+
+ return addr;
+}
+
+bool os::Linux::release_memory_special_shm(char* base, size_t bytes) {
+ // detaching the SHM segment will also delete it, see reserve_memory_special_shm()
+ return shmdt(base) == 0;
+}
+
+bool os::Linux::release_memory_special_huge_tlbfs(char* base, size_t bytes) {
+ return pd_release_memory(base, bytes);
+}
+
bool os::release_memory_special(char* base, size_t bytes) {
+ assert(UseLargePages, "only for large pages");
+
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
- // detaching the SHM segment will also delete it, see reserve_memory_special()
- int rslt = shmdt(base);
- if (rslt == 0) {
+
+ bool res;
+ if (UseSHM) {
+ res = os::Linux::release_memory_special_shm(base, bytes);
+ } else {
+ assert(UseHugeTLBFS, "must be");
+ res = os::Linux::release_memory_special_huge_tlbfs(base, bytes);
+ }
+
+ if (res) {
tkr.record((address)base, bytes);
- return true;
} else {
tkr.discard();
- return false;
- }
+ }
+
+ return res;
}
size_t os::large_page_size() {
return _large_page_size;
}
-// HugeTLBFS allows application to commit large page memory on demand;
-// with SysV SHM the entire memory region must be allocated as shared
+// With SysV SHM the entire memory region must be allocated as shared
// memory.
+// HugeTLBFS allows application to commit large page memory on demand.
+// However, when committing memory with HugeTLBFS fails, the region
+// that was supposed to be committed will lose the old reservation
+// and allow other threads to steal that memory region. Because of this
+// behavior we can't commit HugeTLBFS memory.
bool os::can_commit_large_page_memory() {
- return UseHugeTLBFS;
+ return UseTransparentHugePages;
}
bool os::can_execute_large_page_memory() {
- return UseHugeTLBFS;
+ return UseTransparentHugePages || UseHugeTLBFS;
}
// Reserve memory at an arbitrary address, only if that area is
@@ -4563,21 +4767,23 @@
UseNUMA = false;
}
}
- // With SHM large pages we cannot uncommit a page, so there's not way
+ // With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way
// we can make the adaptive lgrp chunk resizing work. If the user specified
- // both UseNUMA and UseLargePages (or UseSHM) on the command line - warn and
+ // both UseNUMA and UseLargePages (or UseSHM/UseHugeTLBFS) on the command line - warn and
// disable adaptive resizing.
- if (UseNUMA && UseLargePages && UseSHM) {
- if (!FLAG_IS_DEFAULT(UseNUMA)) {
- if (FLAG_IS_DEFAULT(UseLargePages) && FLAG_IS_DEFAULT(UseSHM)) {
+ if (UseNUMA && UseLargePages && !can_commit_large_page_memory()) {
+ if (FLAG_IS_DEFAULT(UseNUMA)) {
+ UseNUMA = false;
+ } else {
+ if (FLAG_IS_DEFAULT(UseLargePages) &&
+ FLAG_IS_DEFAULT(UseSHM) &&
+ FLAG_IS_DEFAULT(UseHugeTLBFS)) {
UseLargePages = false;
} else {
- warning("UseNUMA is not fully compatible with SHM large pages, disabling adaptive resizing");
+ warning("UseNUMA is not fully compatible with SHM/HugeTLBFS large pages, disabling adaptive resizing");
UseAdaptiveSizePolicy = false;
UseAdaptiveNUMAChunkSizing = false;
}
- } else {
- UseNUMA = false;
}
}
if (!UseNUMA && ForceNUMA) {
@@ -5848,3 +6054,149 @@
}
#endif // JAVASE_EMBEDDED
+
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
+#define test_log(...) \
+ do {\
+ if (VerboseInternalVMTests) { \
+ tty->print_cr(__VA_ARGS__); \
+ tty->flush(); \
+ }\
+ } while (false)
+
+class TestReserveMemorySpecial : AllStatic {
+ public:
+ static void small_page_write(void* addr, size_t size) {
+ size_t page_size = os::vm_page_size();
+
+ char* end = (char*)addr + size;
+ for (char* p = (char*)addr; p < end; p += page_size) {
+ *p = 1;
+ }
+ }
+
+ static void test_reserve_memory_special_huge_tlbfs_only(size_t size) {
+ if (!UseHugeTLBFS) {
+ return;
+ }
+
+ test_log("test_reserve_memory_special_huge_tlbfs_only(" SIZE_FORMAT ")", size);
+
+ char* addr = os::Linux::reserve_memory_special_huge_tlbfs_only(size, NULL, false);
+
+ if (addr != NULL) {
+ small_page_write(addr, size);
+
+ os::Linux::release_memory_special_huge_tlbfs(addr, size);
+ }
+ }
+
+ static void test_reserve_memory_special_huge_tlbfs_only() {
+ if (!UseHugeTLBFS) {
+ return;
+ }
+
+ size_t lp = os::large_page_size();
+
+ for (size_t size = lp; size <= lp * 10; size += lp) {
+ test_reserve_memory_special_huge_tlbfs_only(size);
+ }
+ }
+
+ static void test_reserve_memory_special_huge_tlbfs_mixed(size_t size, size_t alignment) {
+ if (!UseHugeTLBFS) {
+ return;
+ }
+
+ test_log("test_reserve_memory_special_huge_tlbfs_mixed(" SIZE_FORMAT ", " SIZE_FORMAT ")",
+ size, alignment);
+
+ assert(size >= os::large_page_size(), "Incorrect input to test");
+
+ char* addr = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false);
+
+ if (addr != NULL) {
+ small_page_write(addr, size);
+
+ os::Linux::release_memory_special_huge_tlbfs(addr, size);
+ }
+ }
+
+ static void test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(size_t size) {
+ size_t lp = os::large_page_size();
+ size_t ag = os::vm_allocation_granularity();
+
+ for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
+ test_reserve_memory_special_huge_tlbfs_mixed(size, alignment);
+ }
+ }
+
+ static void test_reserve_memory_special_huge_tlbfs_mixed() {
+ size_t lp = os::large_page_size();
+ size_t ag = os::vm_allocation_granularity();
+
+ test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp);
+ test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp + ag);
+ test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp + lp / 2);
+ test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2);
+ test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 + ag);
+ test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 - ag);
+ test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 + lp / 2);
+ test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 10);
+ test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 10 + lp / 2);
+ }
+
+ static void test_reserve_memory_special_huge_tlbfs() {
+ if (!UseHugeTLBFS) {
+ return;
+ }
+
+ test_reserve_memory_special_huge_tlbfs_only();
+ test_reserve_memory_special_huge_tlbfs_mixed();
+ }
+
+ static void test_reserve_memory_special_shm(size_t size, size_t alignment) {
+ if (!UseSHM) {
+ return;
+ }
+
+ test_log("test_reserve_memory_special_shm(" SIZE_FORMAT ", " SIZE_FORMAT ")", size, alignment);
+
+ char* addr = os::Linux::reserve_memory_special_shm(size, alignment, NULL, false);
+
+ if (addr != NULL) {
+ assert(is_ptr_aligned(addr, alignment), "Check");
+ assert(is_ptr_aligned(addr, os::large_page_size()), "Check");
+
+ small_page_write(addr, size);
+
+ os::Linux::release_memory_special_shm(addr, size);
+ }
+ }
+
+ static void test_reserve_memory_special_shm() {
+ size_t lp = os::large_page_size();
+ size_t ag = os::vm_allocation_granularity();
+
+ for (size_t size = ag; size < lp * 3; size += ag) {
+ for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
+ test_reserve_memory_special_shm(size, alignment);
+ }
+ }
+ }
+
+ static void test() {
+ test_reserve_memory_special_huge_tlbfs();
+ test_reserve_memory_special_shm();
+ }
+};
+
+void TestReserveMemorySpecial_test() {
+ TestReserveMemorySpecial::test();
+}
+
+#endif
--- a/hotspot/src/os/linux/vm/os_linux.hpp Mon Aug 26 09:33:01 2013 +0200
+++ b/hotspot/src/os/linux/vm/os_linux.hpp Mon Aug 26 07:01:23 2013 -0700
@@ -32,6 +32,7 @@
class Linux {
friend class os;
+ friend class TestReserveMemorySpecial;
// For signal-chaining
#define MAXSIGNUM 32
@@ -92,8 +93,21 @@
static void rebuild_cpu_to_node_map();
static GrowableArray<int>* cpu_to_node() { return _cpu_to_node; }
+ static size_t find_large_page_size();
+ static size_t setup_large_page_size();
+
+ static bool setup_large_page_type(size_t page_size);
+ static bool transparent_huge_pages_sanity_check(bool warn, size_t pages_size);
static bool hugetlbfs_sanity_check(bool warn, size_t page_size);
+ static char* reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec);
+ static char* reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec);
+ static char* reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec);
+ static char* reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec);
+
+ static bool release_memory_special_shm(char* base, size_t bytes);
+ static bool release_memory_special_huge_tlbfs(char* base, size_t bytes);
+
static void print_full_memory_info(outputStream* st);
static void print_distro_info(outputStream* st);
static void print_libversion_info(outputStream* st);
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp Mon Aug 26 09:33:01 2013 +0200
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp Mon Aug 26 07:01:23 2013 -0700
@@ -3385,7 +3385,7 @@
return true;
}
-char* os::reserve_memory_special(size_t size, char* addr, bool exec) {
+char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
fatal("os::reserve_memory_special should not be called on Solaris.");
return NULL;
}
@@ -6601,3 +6601,9 @@
return strlen(buffer);
}
+
+#ifndef PRODUCT
+void TestReserveMemorySpecial_test() {
+ // No tests available for this platform
+}
+#endif
--- a/hotspot/src/os/windows/vm/os_windows.cpp Mon Aug 26 09:33:01 2013 +0200
+++ b/hotspot/src/os/windows/vm/os_windows.cpp Mon Aug 26 07:01:23 2013 -0700
@@ -3156,7 +3156,12 @@
return true;
}
-char* os::reserve_memory_special(size_t bytes, char* addr, bool exec) {
+char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, bool exec) {
+ assert(UseLargePages, "only for large pages");
+
+ if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
+ return NULL; // Fallback to small pages.
+ }
const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
@@ -5638,3 +5643,9 @@
}
#endif
+
+#ifndef PRODUCT
+void TestReserveMemorySpecial_test() {
+ // No tests available for this platform
+}
+#endif
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Mon Aug 26 09:33:01 2013 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Mon Aug 26 07:01:23 2013 -0700
@@ -2006,10 +2006,12 @@
size_t init_byte_size = collector_policy()->initial_heap_byte_size();
size_t max_byte_size = collector_policy()->max_heap_byte_size();
+ size_t heap_alignment = collector_policy()->max_alignment();
// Ensure that the sizes are properly aligned.
Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
+ Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
_cg1r = new ConcurrentG1Refine(this);
@@ -2026,12 +2028,8 @@
// If this happens then we could end up using a non-optimal
// compressed oops mode.
- // Since max_byte_size is aligned to the size of a heap region (checked
- // above).
- Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
-
ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
- HeapRegion::GrainBytes);
+ heap_alignment);
// It is important to do this in a way such that concurrent readers can't
// temporarily think something is in the heap. (I've actually seen this
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Mon Aug 26 09:33:01 2013 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Mon Aug 26 07:01:23 2013 -0700
@@ -313,7 +313,8 @@
void G1CollectorPolicy::initialize_flags() {
set_min_alignment(HeapRegion::GrainBytes);
size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name());
- set_max_alignment(MAX2(card_table_alignment, min_alignment()));
+ size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
+ set_max_alignment(MAX3(card_table_alignment, min_alignment(), page_size));
if (SurvivorRatio < 1) {
vm_exit_during_initialization("Invalid survivor ratio specified");
}
--- a/hotspot/src/share/vm/memory/collectorPolicy.cpp Mon Aug 26 09:33:01 2013 +0200
+++ b/hotspot/src/share/vm/memory/collectorPolicy.cpp Mon Aug 26 07:01:23 2013 -0700
@@ -193,6 +193,8 @@
alignment = lcm(os::large_page_size(), alignment);
}
+ assert(alignment >= min_alignment(), "Must be");
+
return alignment;
}
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp Mon Aug 26 09:33:01 2013 +0200
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp Mon Aug 26 07:01:23 2013 -0700
@@ -95,13 +95,13 @@
guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
// The heap must be at least as aligned as generations.
- size_t alignment = Generation::GenGrain;
+ size_t gen_alignment = Generation::GenGrain;
_gen_specs = gen_policy()->generations();
// Make sure the sizes are all aligned.
for (i = 0; i < _n_gens; i++) {
- _gen_specs[i]->align(alignment);
+ _gen_specs[i]->align(gen_alignment);
}
// Allocate space for the heap.
@@ -109,9 +109,11 @@
char* heap_address;
size_t total_reserved = 0;
int n_covered_regions = 0;
- ReservedSpace heap_rs(0);
+ ReservedSpace heap_rs;
- heap_address = allocate(alignment, &total_reserved,
+ size_t heap_alignment = collector_policy()->max_alignment();
+
+ heap_address = allocate(heap_alignment, &total_reserved,
&n_covered_regions, &heap_rs);
if (!heap_rs.is_reserved()) {
@@ -168,6 +170,8 @@
const size_t pageSize = UseLargePages ?
os::large_page_size() : os::vm_page_size();
+ assert(alignment % pageSize == 0, "Must be");
+
for (int i = 0; i < _n_gens; i++) {
total_reserved += _gen_specs[i]->max_size();
if (total_reserved < _gen_specs[i]->max_size()) {
@@ -175,24 +179,17 @@
}
n_covered_regions += _gen_specs[i]->n_covered_regions();
}
- assert(total_reserved % pageSize == 0,
- err_msg("Gen size; total_reserved=" SIZE_FORMAT ", pageSize="
- SIZE_FORMAT, total_reserved, pageSize));
+ assert(total_reserved % alignment == 0,
+ err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment="
+ SIZE_FORMAT, total_reserved, alignment));
// Needed until the cardtable is fixed to have the right number
// of covered regions.
n_covered_regions += 2;
- if (UseLargePages) {
- assert(total_reserved != 0, "total_reserved cannot be 0");
- total_reserved = round_to(total_reserved, os::large_page_size());
- if (total_reserved < os::large_page_size()) {
- vm_exit_during_initialization(overflow_msg);
- }
- }
+ *_total_reserved = total_reserved;
+ *_n_covered_regions = n_covered_regions;
- *_total_reserved = total_reserved;
- *_n_covered_regions = n_covered_regions;
*heap_rs = Universe::reserve_heap(total_reserved, alignment);
return heap_rs->base();
}
--- a/hotspot/src/share/vm/memory/metaspace.cpp Mon Aug 26 09:33:01 2013 +0200
+++ b/hotspot/src/share/vm/memory/metaspace.cpp Mon Aug 26 07:01:23 2013 -0700
@@ -345,7 +345,7 @@
};
// byte_size is the size of the associated virtualspace.
-VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(0), _container_count(0) {
+VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
// align up to vm allocation granularity
byte_size = align_size_up(byte_size, os::vm_allocation_granularity());
--- a/hotspot/src/share/vm/memory/universe.cpp Mon Aug 26 09:33:01 2013 +0200
+++ b/hotspot/src/share/vm/memory/universe.cpp Mon Aug 26 07:01:23 2013 -0700
@@ -681,17 +681,23 @@
// 32Gb
// OopEncodingHeapMax == NarrowOopHeapMax << LogMinObjAlignmentInBytes;
-char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
+char* Universe::preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode) {
+ assert(is_size_aligned((size_t)OopEncodingHeapMax, alignment), "Must be");
+ assert(is_size_aligned((size_t)NarrowOopHeapMax, alignment), "Must be");
+ assert(is_size_aligned(heap_size, alignment), "Must be");
+
+ uintx heap_base_min_address_aligned = align_size_up(HeapBaseMinAddress, alignment);
+
size_t base = 0;
#ifdef _LP64
if (UseCompressedOops) {
assert(mode == UnscaledNarrowOop ||
mode == ZeroBasedNarrowOop ||
mode == HeapBasedNarrowOop, "mode is invalid");
- const size_t total_size = heap_size + HeapBaseMinAddress;
+ const size_t total_size = heap_size + heap_base_min_address_aligned;
// Return specified base for the first request.
if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
- base = HeapBaseMinAddress;
+ base = heap_base_min_address_aligned;
// If the total size is small enough to allow UnscaledNarrowOop then
// just use UnscaledNarrowOop.
@@ -742,6 +748,8 @@
}
}
#endif
+
+ assert(is_ptr_aligned((char*)base, alignment), "Must be");
return (char*)base; // also return NULL (don't care) for 32-bit VM
}
@@ -867,27 +875,33 @@
size_t total_reserved = align_size_up(heap_size, alignment);
assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
"heap size is too big for compressed oops");
- char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
- ReservedHeapSpace total_rs(total_reserved, alignment, UseLargePages, addr);
+ bool use_large_pages = UseLargePages && is_size_aligned(alignment, os::large_page_size());
+ assert(!UseLargePages
+ || UseParallelOldGC
+ || use_large_pages, "Wrong alignment to use large pages");
+
+ char* addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::UnscaledNarrowOop);
+
+ ReservedHeapSpace total_rs(total_reserved, alignment, use_large_pages, addr);
if (UseCompressedOops) {
if (addr != NULL && !total_rs.is_reserved()) {
// Failed to reserve at specified address - the requested memory
// region is taken already, for example, by 'java' launcher.
// Try again to reserver heap higher.
- addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
+ addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::ZeroBasedNarrowOop);
ReservedHeapSpace total_rs0(total_reserved, alignment,
- UseLargePages, addr);
+ use_large_pages, addr);
if (addr != NULL && !total_rs0.is_reserved()) {
// Failed to reserve at specified address again - give up.
- addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
+ addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::HeapBasedNarrowOop);
assert(addr == NULL, "");
ReservedHeapSpace total_rs1(total_reserved, alignment,
- UseLargePages, addr);
+ use_large_pages, addr);
total_rs = total_rs1;
} else {
total_rs = total_rs0;
--- a/hotspot/src/share/vm/memory/universe.hpp Mon Aug 26 09:33:01 2013 +0200
+++ b/hotspot/src/share/vm/memory/universe.hpp Mon Aug 26 07:01:23 2013 -0700
@@ -346,7 +346,7 @@
};
static NARROW_OOP_MODE narrow_oop_mode();
static const char* narrow_oop_mode_to_string(NARROW_OOP_MODE mode);
- static char* preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode);
+ static char* preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode);
static char* preferred_metaspace_base(size_t heap_size, NARROW_OOP_MODE mode);
static address narrow_oop_base() { return _narrow_oop._base; }
static bool is_narrow_oop_base(void* addr) { return (narrow_oop_base() == (address)addr); }
--- a/hotspot/src/share/vm/prims/jni.cpp Mon Aug 26 09:33:01 2013 +0200
+++ b/hotspot/src/share/vm/prims/jni.cpp Mon Aug 26 07:01:23 2013 -0700
@@ -5045,9 +5045,15 @@
tty->print_cr("Running test: " #unit_test_function_call); \
unit_test_function_call
+// Forward declaration
+void TestReservedSpace_test();
+void TestReserveMemorySpecial_test();
+
void execute_internal_vm_tests() {
if (ExecuteInternalVMTests) {
tty->print_cr("Running internal VM tests");
+ run_unit_test(TestReservedSpace_test());
+ run_unit_test(TestReserveMemorySpecial_test());
run_unit_test(GlobalDefinitions::test_globals());
run_unit_test(GCTimerAllTest::all());
run_unit_test(arrayOopDesc::test_max_array_length());
--- a/hotspot/src/share/vm/runtime/globals.hpp Mon Aug 26 09:33:01 2013 +0200
+++ b/hotspot/src/share/vm/runtime/globals.hpp Mon Aug 26 07:01:23 2013 -0700
@@ -1933,6 +1933,9 @@
notproduct(bool, ExecuteInternalVMTests, false, \
"Enable execution of internal VM tests.") \
\
+ notproduct(bool, VerboseInternalVMTests, false, \
+ "Turn on logging for internal VM tests.") \
+ \
product_pd(bool, UseTLAB, "Use thread-local object allocation") \
\
product_pd(bool, ResizeTLAB, \
--- a/hotspot/src/share/vm/runtime/os.hpp Mon Aug 26 09:33:01 2013 +0200
+++ b/hotspot/src/share/vm/runtime/os.hpp Mon Aug 26 07:01:23 2013 -0700
@@ -328,8 +328,8 @@
static char* non_memory_address_word();
// reserve, commit and pin the entire memory region
- static char* reserve_memory_special(size_t size, char* addr = NULL,
- bool executable = false);
+ static char* reserve_memory_special(size_t size, size_t alignment,
+ char* addr, bool executable);
static bool release_memory_special(char* addr, size_t bytes);
static void large_page_init();
static size_t large_page_size();
--- a/hotspot/src/share/vm/runtime/virtualspace.cpp Mon Aug 26 09:33:01 2013 +0200
+++ b/hotspot/src/share/vm/runtime/virtualspace.cpp Mon Aug 26 07:01:23 2013 -0700
@@ -42,8 +42,19 @@
// ReservedSpace
+
+// Dummy constructor
+ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
+ _alignment(0), _special(false), _executable(false) {
+}
+
ReservedSpace::ReservedSpace(size_t size) {
- initialize(size, 0, false, NULL, 0, false);
+ size_t page_size = os::page_size_for_region(size, size, 1);
+ bool large_pages = page_size != (size_t)os::vm_page_size();
+ // Don't force the alignment to be large page aligned,
+ // since that will waste memory.
+ size_t alignment = os::vm_allocation_granularity();
+ initialize(size, alignment, large_pages, NULL, 0, false);
}
ReservedSpace::ReservedSpace(size_t size, size_t alignment,
@@ -129,16 +140,18 @@
if (special) {
- base = os::reserve_memory_special(size, requested_address, executable);
+ base = os::reserve_memory_special(size, alignment, requested_address, executable);
if (base != NULL) {
if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
// OS ignored requested address. Try different address.
return;
}
- // Check alignment constraints
+ // Check alignment constraints.
assert((uintptr_t) base % alignment == 0,
- "Large pages returned a non-aligned address");
+ err_msg("Large pages returned a non-aligned address, base: "
+ PTR_FORMAT " alignment: " PTR_FORMAT,
+ base, (void*)(uintptr_t)alignment));
_special = true;
} else {
// failed; try to reserve regular memory below
@@ -715,4 +728,188 @@
tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low_boundary(), high_boundary());
}
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
+#define test_log(...) \
+ do {\
+ if (VerboseInternalVMTests) { \
+ tty->print_cr(__VA_ARGS__); \
+ tty->flush(); \
+ }\
+ } while (false)
+
+class TestReservedSpace : AllStatic {
+ public:
+ static void small_page_write(void* addr, size_t size) {
+ size_t page_size = os::vm_page_size();
+
+ char* end = (char*)addr + size;
+ for (char* p = (char*)addr; p < end; p += page_size) {
+ *p = 1;
+ }
+ }
+
+ static void release_memory_for_test(ReservedSpace rs) {
+ if (rs.special()) {
+ guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
+ } else {
+ guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
+ }
+ }
+
+ static void test_reserved_space1(size_t size, size_t alignment) {
+ test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
+
+ assert(is_size_aligned(size, alignment), "Incorrect input parameters");
+
+ ReservedSpace rs(size, // size
+ alignment, // alignment
+ UseLargePages, // large
+ NULL, // requested_address
+ 0); // noacces_prefix
+
+ test_log(" rs.special() == %d", rs.special());
+
+ assert(rs.base() != NULL, "Must be");
+ assert(rs.size() == size, "Must be");
+
+ assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
+ assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
+
+ if (rs.special()) {
+ small_page_write(rs.base(), size);
+ }
+
+ release_memory_for_test(rs);
+ }
+
+ static void test_reserved_space2(size_t size) {
+ test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
+
+ assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
+
+ ReservedSpace rs(size);
+
+ test_log(" rs.special() == %d", rs.special());
+
+ assert(rs.base() != NULL, "Must be");
+ assert(rs.size() == size, "Must be");
+
+ if (rs.special()) {
+ small_page_write(rs.base(), size);
+ }
+
+ release_memory_for_test(rs);
+ }
+
+ static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
+ test_log("test_reserved_space3(%p, %p, %d)",
+ (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
+
+ assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
+ assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
+
+ bool large = maybe_large && UseLargePages && size >= os::large_page_size();
+
+ ReservedSpace rs(size, alignment, large, false);
+
+ test_log(" rs.special() == %d", rs.special());
+
+ assert(rs.base() != NULL, "Must be");
+ assert(rs.size() == size, "Must be");
+
+ if (rs.special()) {
+ small_page_write(rs.base(), size);
+ }
+
+ release_memory_for_test(rs);
+ }
+
+
+ static void test_reserved_space1() {
+ size_t size = 2 * 1024 * 1024;
+ size_t ag = os::vm_allocation_granularity();
+
+ test_reserved_space1(size, ag);
+ test_reserved_space1(size * 2, ag);
+ test_reserved_space1(size * 10, ag);
+ }
+
+ static void test_reserved_space2() {
+ size_t size = 2 * 1024 * 1024;
+ size_t ag = os::vm_allocation_granularity();
+
+ test_reserved_space2(size * 1);
+ test_reserved_space2(size * 2);
+ test_reserved_space2(size * 10);
+ test_reserved_space2(ag);
+ test_reserved_space2(size - ag);
+ test_reserved_space2(size);
+ test_reserved_space2(size + ag);
+ test_reserved_space2(size * 2);
+ test_reserved_space2(size * 2 - ag);
+ test_reserved_space2(size * 2 + ag);
+ test_reserved_space2(size * 3);
+ test_reserved_space2(size * 3 - ag);
+ test_reserved_space2(size * 3 + ag);
+ test_reserved_space2(size * 10);
+ test_reserved_space2(size * 10 + size / 2);
+ }
+
+ static void test_reserved_space3() {
+ size_t ag = os::vm_allocation_granularity();
+
+ test_reserved_space3(ag, ag , false);
+ test_reserved_space3(ag * 2, ag , false);
+ test_reserved_space3(ag * 3, ag , false);
+ test_reserved_space3(ag * 2, ag * 2, false);
+ test_reserved_space3(ag * 4, ag * 2, false);
+ test_reserved_space3(ag * 8, ag * 2, false);
+ test_reserved_space3(ag * 4, ag * 4, false);
+ test_reserved_space3(ag * 8, ag * 4, false);
+ test_reserved_space3(ag * 16, ag * 4, false);
+
+ if (UseLargePages) {
+ size_t lp = os::large_page_size();
+
+ // Without large pages
+ test_reserved_space3(lp, ag * 4, false);
+ test_reserved_space3(lp * 2, ag * 4, false);
+ test_reserved_space3(lp * 4, ag * 4, false);
+ test_reserved_space3(lp, lp , false);
+ test_reserved_space3(lp * 2, lp , false);
+ test_reserved_space3(lp * 3, lp , false);
+ test_reserved_space3(lp * 2, lp * 2, false);
+ test_reserved_space3(lp * 4, lp * 2, false);
+ test_reserved_space3(lp * 8, lp * 2, false);
+
+ // With large pages
+ test_reserved_space3(lp, ag * 4 , true);
+ test_reserved_space3(lp * 2, ag * 4, true);
+ test_reserved_space3(lp * 4, ag * 4, true);
+ test_reserved_space3(lp, lp , true);
+ test_reserved_space3(lp * 2, lp , true);
+ test_reserved_space3(lp * 3, lp , true);
+ test_reserved_space3(lp * 2, lp * 2, true);
+ test_reserved_space3(lp * 4, lp * 2, true);
+ test_reserved_space3(lp * 8, lp * 2, true);
+ }
+ }
+
+ static void test_reserved_space() {
+ test_reserved_space1();
+ test_reserved_space2();
+ test_reserved_space3();
+ }
+};
+
+void TestReservedSpace_test() {
+ TestReservedSpace::test_reserved_space();
+}
+
+#endif // PRODUCT
+
#endif
--- a/hotspot/src/share/vm/runtime/virtualspace.hpp Mon Aug 26 09:33:01 2013 +0200
+++ b/hotspot/src/share/vm/runtime/virtualspace.hpp Mon Aug 26 07:01:23 2013 -0700
@@ -53,6 +53,7 @@
public:
// Constructor
+ ReservedSpace();
ReservedSpace(size_t size);
ReservedSpace(size_t size, size_t alignment, bool large,
char* requested_address = NULL,
--- a/hotspot/src/share/vm/services/memTracker.hpp Mon Aug 26 09:33:01 2013 +0200
+++ b/hotspot/src/share/vm/services/memTracker.hpp Mon Aug 26 07:01:23 2013 -0700
@@ -87,6 +87,8 @@
MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { }
static inline void record_virtual_memory_commit(address addr, size_t size,
address pc = 0, Thread* thread = NULL) { }
+ static inline void record_virtual_memory_release(address addr, size_t size,
+ Thread* thread = NULL) { }
static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
Thread* thread = NULL) { }
static inline Tracker get_realloc_tracker() { return _tkr; }
@@ -372,6 +374,13 @@
tkr.record(addr, size, flags, pc);
}
+ static inline void record_virtual_memory_release(address addr, size_t size,
+ Thread* thread = NULL) {
+ if (is_on()) {
+ Tracker tkr(Tracker::Release, thread);
+ tkr.record(addr, size);
+ }
+ }
// record memory type on virtual memory base address
static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
--- a/hotspot/src/share/vm/utilities/globalDefinitions.hpp Mon Aug 26 09:33:01 2013 +0200
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.hpp Mon Aug 26 07:01:23 2013 -0700
@@ -402,6 +402,14 @@
#define align_size_up_(size, alignment) (((size) + ((alignment) - 1)) & ~((alignment) - 1))
+inline bool is_size_aligned(size_t size, size_t alignment) {
+ return align_size_up_(size, alignment) == size;
+}
+
+inline bool is_ptr_aligned(void* ptr, size_t alignment) {
+ return align_size_up_((intptr_t)ptr, (intptr_t)alignment) == (intptr_t)ptr;
+}
+
inline intptr_t align_size_up(intptr_t size, intptr_t alignment) {
return align_size_up_(size, alignment);
}
@@ -414,6 +422,14 @@
#define is_size_aligned_(size, alignment) ((size) == (align_size_up_(size, alignment)))
+inline void* align_ptr_up(void* ptr, size_t alignment) {
+ return (void*)align_size_up((intptr_t)ptr, (intptr_t)alignment);
+}
+
+inline void* align_ptr_down(void* ptr, size_t alignment) {
+ return (void*)align_size_down((intptr_t)ptr, (intptr_t)alignment);
+}
+
// Align objects by rounding up their size, in HeapWord units.
#define align_object_size_(size) align_size_up_(size, MinObjAlignment)