6716785: implicit null checks not triggering with CompressedOops
authorcoleenp
Sat, 19 Jul 2008 17:38:22 -0400
changeset 823 9a5271881bc0
parent 817 cd8b8f500fac
child 824 ea3d0c81cee9
child 950 6112b627bb36
6716785: implicit null checks not triggering with CompressedOops Summary: allocate alignment-sized page(s) below java heap so that memory accesses at heap_base+1page give signal and cause an implicit null check Reviewed-by: kvn, jmasa, phh, jcoomes
hotspot/src/os/linux/vm/os_linux.cpp
hotspot/src/os/solaris/vm/os_solaris.cpp
hotspot/src/os/windows/vm/os_windows.cpp
hotspot/src/os_cpu/linux_sparc/vm/assembler_linux_sparc.cpp
hotspot/src/os_cpu/linux_x86/vm/assembler_linux_x86_32.cpp
hotspot/src/os_cpu/linux_x86/vm/assembler_linux_x86_64.cpp
hotspot/src/os_cpu/solaris_sparc/vm/assembler_solaris_sparc.cpp
hotspot/src/os_cpu/solaris_x86/vm/assembler_solaris_x86_32.cpp
hotspot/src/os_cpu/solaris_x86/vm/assembler_solaris_x86_64.cpp
hotspot/src/os_cpu/windows_x86/vm/assembler_windows_x86_32.cpp
hotspot/src/os_cpu/windows_x86/vm/assembler_windows_x86_64.cpp
hotspot/src/share/vm/asm/assembler.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.cpp
hotspot/src/share/vm/memory/genCollectedHeap.cpp
hotspot/src/share/vm/prims/jni.cpp
hotspot/src/share/vm/runtime/arguments.cpp
hotspot/src/share/vm/runtime/os.cpp
hotspot/src/share/vm/runtime/os.hpp
hotspot/src/share/vm/runtime/virtualspace.cpp
hotspot/src/share/vm/runtime/virtualspace.hpp
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Wed Jul 05 16:39:00 2017 +0200
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Sat Jul 19 17:38:22 2008 -0400
@@ -2414,8 +2414,20 @@
   return ::mprotect(bottom, size, prot) == 0;
 }
 
-bool os::protect_memory(char* addr, size_t size) {
-  return linux_mprotect(addr, size, PROT_READ);
+// Set protections specified
+bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
+                        bool is_committed) {
+  unsigned int p = 0;
+  switch (prot) {
+  case MEM_PROT_NONE: p = PROT_NONE; break;
+  case MEM_PROT_READ: p = PROT_READ; break;
+  case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
+  case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
+  default:
+    ShouldNotReachHere();
+  }
+  // is_committed is unused.
+  return linux_mprotect(addr, bytes, p);
 }
 
 bool os::guard_memory(char* addr, size_t size) {
@@ -3704,8 +3716,9 @@
 
 // Mark the polling page as readable
 void os::make_polling_page_readable(void) {
-  if( !protect_memory((char *)_polling_page, Linux::page_size()) )
+  if( !linux_mprotect((char *)_polling_page, Linux::page_size(), PROT_READ)) {
     fatal("Could not enable polling page");
+  }
 };
 
 int os::active_processor_count() {
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Wed Jul 05 16:39:00 2017 +0200
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Sat Jul 19 17:38:22 2008 -0400
@@ -2965,10 +2965,21 @@
   return retVal == 0;
 }
 
-// Protect memory (make it read-only. (Used to pass readonly pages through
+// Protect memory (Used to pass readonly pages through
 // JNI GetArray<type>Elements with empty arrays.)
-bool os::protect_memory(char* addr, size_t bytes) {
-  return solaris_mprotect(addr, bytes, PROT_READ);
+bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
+                        bool is_committed) {
+  unsigned int p = 0;
+  switch (prot) {
+  case MEM_PROT_NONE: p = PROT_NONE; break;
+  case MEM_PROT_READ: p = PROT_READ; break;
+  case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
+  case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
+  default:
+    ShouldNotReachHere();
+  }
+  // is_committed is unused.
+  return solaris_mprotect(addr, bytes, p);
 }
 
 // guard_memory and unguard_memory only happens within stack guard pages.
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Wed Jul 05 16:39:00 2017 +0200
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Sat Jul 19 17:38:22 2008 -0400
@@ -2170,6 +2170,7 @@
             // Windows 98 reports faulting addresses incorrectly
             if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr) ||
                 !os::win32::is_nt()) {
+
               return Handle_Exception(exceptionInfo,
                   SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL));
             }
@@ -2563,9 +2564,33 @@
   return VirtualFree(addr, 0, MEM_RELEASE) != 0;
 }
 
-bool os::protect_memory(char* addr, size_t bytes) {
+// Set protections specified
+bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
+                        bool is_committed) {
+  unsigned int p = 0;
+  switch (prot) {
+  case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
+  case MEM_PROT_READ: p = PAGE_READONLY; break;
+  case MEM_PROT_RW:   p = PAGE_READWRITE; break;
+  case MEM_PROT_RWX:  p = PAGE_EXECUTE_READWRITE; break;
+  default:
+    ShouldNotReachHere();
+  }
+
   DWORD old_status;
-  return VirtualProtect(addr, bytes, PAGE_READONLY, &old_status) != 0;
+
+  // Strange enough, but on Win32 one can change protection only for committed
+  // memory, not a big deal anyway, as bytes less or equal than 64K
+  if (!is_committed && !commit_memory(addr, bytes)) {
+    fatal("cannot commit protection page");
+  }
+  // One cannot use os::guard_memory() here, as on Win32 guard page
+  // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
+  //
+  // Pages in the region become guard pages. Any attempt to access a guard page
+  // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
+  // the guard page status. Guard pages thus act as a one-time access alarm.
+  return VirtualProtect(addr, bytes, p, &old_status) != 0;
 }
 
 bool os::guard_memory(char* addr, size_t bytes) {
--- a/hotspot/src/os_cpu/linux_sparc/vm/assembler_linux_sparc.cpp	Wed Jul 05 16:39:00 2017 +0200
+++ b/hotspot/src/os_cpu/linux_sparc/vm/assembler_linux_sparc.cpp	Sat Jul 19 17:38:22 2008 -0400
@@ -27,12 +27,6 @@
 
 #include <asm-sparc/traps.h>
 
-bool MacroAssembler::needs_explicit_null_check(intptr_t offset) {
-  // Since the linux kernel resides at the low end of
-  // user address space, no null pointer check is needed.
-  return offset < 0 || offset >= 0x100000;
-}
-
 void MacroAssembler::read_ccr_trap(Register ccr_save) {
   // No implementation
   breakpoint_trap();
--- a/hotspot/src/os_cpu/linux_x86/vm/assembler_linux_x86_32.cpp	Wed Jul 05 16:39:00 2017 +0200
+++ b/hotspot/src/os_cpu/linux_x86/vm/assembler_linux_x86_32.cpp	Sat Jul 19 17:38:22 2008 -0400
@@ -39,10 +39,3 @@
 
   movptr(thread, tls);
 }
-
-bool MacroAssembler::needs_explicit_null_check(intptr_t offset) {
-  // Linux kernel guarantees that the first page is always unmapped. Don't
-  // assume anything more than that.
-  bool offset_in_first_page =   0 <= offset  &&  offset < os::vm_page_size();
-  return !offset_in_first_page;
-}
--- a/hotspot/src/os_cpu/linux_x86/vm/assembler_linux_x86_64.cpp	Wed Jul 05 16:39:00 2017 +0200
+++ b/hotspot/src/os_cpu/linux_x86/vm/assembler_linux_x86_64.cpp	Sat Jul 19 17:38:22 2008 -0400
@@ -65,22 +65,3 @@
        popq(rax);
    }
 }
-
-bool MacroAssembler::needs_explicit_null_check(intptr_t offset) {
-  // Exception handler checks the nmethod's implicit null checks table
-  // only when this method returns false.
-  if (UseCompressedOops) {
-    // The first page after heap_base is unmapped and
-    // the 'offset' is equal to [heap_base + offset] for
-    // narrow oop implicit null checks.
-    uintptr_t heap_base = (uintptr_t)Universe::heap_base();
-    if ((uintptr_t)offset >= heap_base) {
-      // Normalize offset for the next check.
-      offset = (intptr_t)(pointer_delta((void*)offset, (void*)heap_base, 1));
-    }
-  }
-  // Linux kernel guarantees that the first page is always unmapped. Don't
-  // assume anything more than that.
-  bool offset_in_first_page =   0 <= offset  &&  offset < os::vm_page_size();
-  return !offset_in_first_page;
-}
--- a/hotspot/src/os_cpu/solaris_sparc/vm/assembler_solaris_sparc.cpp	Wed Jul 05 16:39:00 2017 +0200
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/assembler_solaris_sparc.cpp	Sat Jul 19 17:38:22 2008 -0400
@@ -28,18 +28,6 @@
 #include <sys/trap.h>          // For trap numbers
 #include <v9/sys/psr_compat.h> // For V8 compatibility
 
-bool MacroAssembler::needs_explicit_null_check(intptr_t offset) {
-  // The first page of virtual addresses is unmapped on SPARC.
-  // Thus, any access the VM makes through a null pointer with an offset of
-  // less than 4K will get a recognizable SIGSEGV, which the signal handler
-  // will transform into a NullPointerException.
-  // (Actually, the first 64K or so is unmapped, but it's simpler
-  // to depend only on the first 4K or so.)
-
-  bool offset_in_first_page = 0 <= offset && offset < os::vm_page_size();
-  return !offset_in_first_page;
-}
-
 void MacroAssembler::read_ccr_trap(Register ccr_save) {
   // Execute a trap to get the PSR, mask and shift
   // to get the condition codes.
--- a/hotspot/src/os_cpu/solaris_x86/vm/assembler_solaris_x86_32.cpp	Wed Jul 05 16:39:00 2017 +0200
+++ b/hotspot/src/os_cpu/solaris_x86/vm/assembler_solaris_x86_32.cpp	Sat Jul 19 17:38:22 2008 -0400
@@ -79,9 +79,3 @@
   if (thread != rax) popl(rax);
   popl(thread);
 }
-
-bool MacroAssembler::needs_explicit_null_check(intptr_t offset) {
-  // Identical to Sparc/Solaris code
-  bool offset_in_first_page =   0 <= offset  &&  offset < os::vm_page_size();
-  return !offset_in_first_page;
-}
--- a/hotspot/src/os_cpu/solaris_x86/vm/assembler_solaris_x86_64.cpp	Wed Jul 05 16:39:00 2017 +0200
+++ b/hotspot/src/os_cpu/solaris_x86/vm/assembler_solaris_x86_64.cpp	Sat Jul 19 17:38:22 2008 -0400
@@ -85,22 +85,3 @@
     popq(rax);
   }
 }
-
-bool MacroAssembler::needs_explicit_null_check(intptr_t offset) {
-  // Identical to Sparc/Solaris code
-
-  // Exception handler checks the nmethod's implicit null checks table
-  // only when this method returns false.
-  if (UseCompressedOops) {
-    // The first page after heap_base is unmapped and
-    // the 'offset' is equal to [heap_base + offset] for
-    // narrow oop implicit null checks.
-    uintptr_t heap_base = (uintptr_t)Universe::heap_base();
-    if ((uintptr_t)offset >= heap_base) {
-      // Normalize offset for the next check.
-      offset = (intptr_t)(pointer_delta((void*)offset, (void*)heap_base, 1));
-    }
-  }
-  bool offset_in_first_page = 0 <= offset && offset < os::vm_page_size();
-  return !offset_in_first_page;
-}
--- a/hotspot/src/os_cpu/windows_x86/vm/assembler_windows_x86_32.cpp	Wed Jul 05 16:39:00 2017 +0200
+++ b/hotspot/src/os_cpu/windows_x86/vm/assembler_windows_x86_32.cpp	Sat Jul 19 17:38:22 2008 -0400
@@ -58,7 +58,3 @@
          "Thread Pointer Offset has not been initialized");
   movl(thread, Address(thread, ThreadLocalStorage::get_thread_ptr_offset()));
 }
-
-bool MacroAssembler::needs_explicit_null_check(intptr_t offset) {
-  return offset < 0 || (int)os::vm_page_size() <= offset;
-}
--- a/hotspot/src/os_cpu/windows_x86/vm/assembler_windows_x86_64.cpp	Wed Jul 05 16:39:00 2017 +0200
+++ b/hotspot/src/os_cpu/windows_x86/vm/assembler_windows_x86_64.cpp	Sat Jul 19 17:38:22 2008 -0400
@@ -65,19 +65,3 @@
        popq(rax);
    }
 }
-
-bool MacroAssembler::needs_explicit_null_check(intptr_t offset) {
-  // Exception handler checks the nmethod's implicit null checks table
-  // only when this method returns false.
-  if (UseCompressedOops) {
-    // The first page after heap_base is unmapped and
-    // the 'offset' is equal to [heap_base + offset] for
-    // narrow oop implicit null checks.
-    uintptr_t heap_base = (uintptr_t)Universe::heap_base();
-    if ((uintptr_t)offset >= heap_base) {
-      // Normalize offset for the next check.
-      offset = (intptr_t)(pointer_delta((void*)offset, (void*)heap_base, 1));
-    }
-  }
-  return offset < 0 || os::vm_page_size() <= offset;
-}
--- a/hotspot/src/share/vm/asm/assembler.cpp	Wed Jul 05 16:39:00 2017 +0200
+++ b/hotspot/src/share/vm/asm/assembler.cpp	Sat Jul 19 17:38:22 2008 -0400
@@ -246,6 +246,24 @@
   }
 }
 
+bool MacroAssembler::needs_explicit_null_check(intptr_t offset) {
+  // Exception handler checks the nmethod's implicit null checks table
+  // only when this method returns false.
+#ifndef SPARC
+  // Sparc does not have based addressing
+  if (UseCompressedOops) {
+    // The first page after heap_base is unmapped and
+    // the 'offset' is equal to [heap_base + offset] for
+    // narrow oop implicit null checks.
+    uintptr_t heap_base = (uintptr_t)Universe::heap_base();
+    if ((uintptr_t)offset >= heap_base) {
+      // Normalize offset for the next check.
+      offset = (intptr_t)(pointer_delta((void*)offset, (void*)heap_base, 1));
+    }
+  }
+#endif // SPARC
+  return offset < 0 || os::vm_page_size() <= offset;
+}
 
 #ifndef PRODUCT
 void Label::print_instructions(MacroAssembler* masm) const {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp	Wed Jul 05 16:39:00 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp	Sat Jul 19 17:38:22 2008 -0400
@@ -61,6 +61,8 @@
   if (_virtual_space != NULL) {
     delete _virtual_space;
     _virtual_space = NULL;
+    // Release memory reserved in the space.
+    rs.release();
   }
   return false;
 }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Wed Jul 05 16:39:00 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Sat Jul 19 17:38:22 2008 -0400
@@ -108,8 +108,8 @@
   // size than is needed or wanted for the perm gen.  Use the "compound
   // alignment" ReservedSpace ctor to avoid having to use the same page size for
   // all gens.
-  ReservedSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size,
-                        og_align);
+  ReservedHeapSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size,
+                            og_align);
   os::trace_page_sizes("ps perm", pg_min_size, pg_max_size, pg_page_sz,
                        heap_rs.base(), pg_max_size);
   os::trace_page_sizes("ps main", og_min_size + yg_min_size,
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Wed Jul 05 16:39:00 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Sat Jul 19 17:38:22 2008 -0400
@@ -422,6 +422,8 @@
       return vspace;
     }
     delete vspace;
+    // Release memory reserved in the space.
+    rs.release();
   }
 
   return 0;
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.cpp	Wed Jul 05 16:39:00 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.cpp	Sat Jul 19 17:38:22 2008 -0400
@@ -71,13 +71,8 @@
 
 void PSVirtualSpace::release() {
   DEBUG_ONLY(PSVirtualSpaceVerifier this_verifier(this));
-  if (reserved_low_addr() != NULL) {
-    if (special()) {
-      os::release_memory_special(reserved_low_addr(), reserved_size());
-    } else {
-      (void)os::release_memory(reserved_low_addr(), reserved_size());
-    }
-  }
+  // This may not release memory it didn't reserve.
+  // Use rs.release() to release the underlying memory instead.
   _reserved_low_addr = _reserved_high_addr = NULL;
   _committed_low_addr = _committed_high_addr = NULL;
   _special = false;
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Wed Jul 05 16:39:00 2017 +0200
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Sat Jul 19 17:38:22 2008 -0400
@@ -222,8 +222,8 @@
 
   *_total_reserved = total_reserved;
   *_n_covered_regions = n_covered_regions;
-  *heap_rs = ReservedSpace(total_reserved, alignment,
-                           UseLargePages, heap_address);
+  *heap_rs = ReservedHeapSpace(total_reserved, alignment,
+                               UseLargePages, heap_address);
 
   return heap_address;
 }
--- a/hotspot/src/share/vm/prims/jni.cpp	Wed Jul 05 16:39:00 2017 +0200
+++ b/hotspot/src/share/vm/prims/jni.cpp	Sat Jul 19 17:38:22 2008 -0400
@@ -2173,8 +2173,7 @@
     size_t size = os::vm_allocation_granularity();
     bad_address = os::reserve_memory(size);
     if (bad_address != NULL) {
-      os::commit_memory(bad_address, size);
-      os::protect_memory(bad_address, size);
+      os::protect_memory(bad_address, size, os::MEM_PROT_READ);
     }
   }
   return bad_address;
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Wed Jul 05 16:39:00 2017 +0200
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Sat Jul 19 17:38:22 2008 -0400
@@ -1176,9 +1176,7 @@
   // by ergonomics.
   if (MaxHeapSize <= max_heap_for_compressed_oops()) {
     if (FLAG_IS_DEFAULT(UseCompressedOops)) {
-      // Leave compressed oops off by default. Uncomment
-      // the following line to return it to default status.
-      // FLAG_SET_ERGO(bool, UseCompressedOops, true);
+      FLAG_SET_ERGO(bool, UseCompressedOops, true);
     }
   } else {
     if (UseCompressedOops && !FLAG_IS_DEFAULT(UseCompressedOops)) {
--- a/hotspot/src/share/vm/runtime/os.cpp	Wed Jul 05 16:39:00 2017 +0200
+++ b/hotspot/src/share/vm/runtime/os.cpp	Sat Jul 19 17:38:22 2008 -0400
@@ -922,8 +922,9 @@
   // time and expensive page trap spinning, 'SerializePageLock' is used to block
   // the mutator thread if such case is encountered. See bug 6546278 for details.
   Thread::muxAcquire(&SerializePageLock, "serialize_thread_states");
-  os::protect_memory( (char *)os::get_memory_serialize_page(), os::vm_page_size() );
-  os::unguard_memory( (char *)os::get_memory_serialize_page(), os::vm_page_size() );
+  os::protect_memory((char *)os::get_memory_serialize_page(),
+                     os::vm_page_size(), MEM_PROT_READ, /*is_committed*/true );
+  os::unguard_memory((char *)os::get_memory_serialize_page(), os::vm_page_size());
   Thread::muxRelease(&SerializePageLock);
 }
 
--- a/hotspot/src/share/vm/runtime/os.hpp	Wed Jul 05 16:39:00 2017 +0200
+++ b/hotspot/src/share/vm/runtime/os.hpp	Sat Jul 19 17:38:22 2008 -0400
@@ -193,7 +193,11 @@
   static bool   commit_memory(char* addr, size_t size, size_t alignment_hint);
   static bool   uncommit_memory(char* addr, size_t bytes);
   static bool   release_memory(char* addr, size_t bytes);
-  static bool   protect_memory(char* addr, size_t bytes);
+
+  enum ProtType { MEM_PROT_NONE, MEM_PROT_READ, MEM_PROT_RW, MEM_PROT_RWX };
+  static bool   protect_memory(char* addr, size_t bytes, ProtType prot,
+                               bool is_committed = false);
+
   static bool   guard_memory(char* addr, size_t bytes);
   static bool   unguard_memory(char* addr, size_t bytes);
   static char*  map_memory(int fd, const char* file_name, size_t file_offset,
--- a/hotspot/src/share/vm/runtime/virtualspace.cpp	Wed Jul 05 16:39:00 2017 +0200
+++ b/hotspot/src/share/vm/runtime/virtualspace.cpp	Sat Jul 19 17:38:22 2008 -0400
@@ -28,12 +28,15 @@
 
 // ReservedSpace
 ReservedSpace::ReservedSpace(size_t size) {
-  initialize(size, 0, false, NULL);
+  initialize(size, 0, false, NULL, 0);
 }
 
 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
-                             bool large, char* requested_address) {
-  initialize(size, alignment, large, requested_address);
+                             bool large,
+                             char* requested_address,
+                             const size_t noaccess_prefix) {
+  initialize(size+noaccess_prefix, alignment, large, requested_address,
+             noaccess_prefix);
 }
 
 char *
@@ -105,7 +108,8 @@
 ReservedSpace::ReservedSpace(const size_t prefix_size,
                              const size_t prefix_align,
                              const size_t suffix_size,
-                             const size_t suffix_align)
+                             const size_t suffix_align,
+                             const size_t noaccess_prefix)
 {
   assert(prefix_size != 0, "sanity");
   assert(prefix_align != 0, "sanity");
@@ -118,12 +122,16 @@
   assert((suffix_align & prefix_align - 1) == 0,
     "suffix_align not divisible by prefix_align");
 
+  // Add in noaccess_prefix to prefix_size;
+  const size_t adjusted_prefix_size = prefix_size + noaccess_prefix;
+  const size_t size = adjusted_prefix_size + suffix_size;
+
   // On systems where the entire region has to be reserved and committed up
   // front, the compound alignment normally done by this method is unnecessary.
   const bool try_reserve_special = UseLargePages &&
     prefix_align == os::large_page_size();
   if (!os::can_commit_large_page_memory() && try_reserve_special) {
-    initialize(prefix_size + suffix_size, prefix_align, true);
+    initialize(size, prefix_align, true, NULL, noaccess_prefix);
     return;
   }
 
@@ -131,15 +139,19 @@
   _size = 0;
   _alignment = 0;
   _special = false;
+  _noaccess_prefix = 0;
+
+  // Assert that if noaccess_prefix is used, it is the same as prefix_align.
+  assert(noaccess_prefix == 0 ||
+         noaccess_prefix == prefix_align, "noaccess prefix wrong");
 
   // Optimistically try to reserve the exact size needed.
-  const size_t size = prefix_size + suffix_size;
   char* addr = os::reserve_memory(size, NULL, prefix_align);
   if (addr == NULL) return;
 
   // Check whether the result has the needed alignment (unlikely unless
   // prefix_align == suffix_align).
-  const size_t ofs = size_t(addr) + prefix_size & suffix_align - 1;
+  const size_t ofs = size_t(addr) + adjusted_prefix_size & suffix_align - 1;
   if (ofs != 0) {
     // Wrong alignment.  Release, allocate more space and do manual alignment.
     //
@@ -153,11 +165,11 @@
     }
 
     const size_t extra = MAX2(ofs, suffix_align - ofs);
-    addr = reserve_and_align(size + extra, prefix_size, prefix_align,
+    addr = reserve_and_align(size + extra, adjusted_prefix_size, prefix_align,
                              suffix_size, suffix_align);
     if (addr == NULL) {
       // Try an even larger region.  If this fails, address space is exhausted.
-      addr = reserve_and_align(size + suffix_align, prefix_size,
+      addr = reserve_and_align(size + suffix_align, adjusted_prefix_size,
                                prefix_align, suffix_size, suffix_align);
     }
   }
@@ -165,10 +177,12 @@
   _base = addr;
   _size = size;
   _alignment = prefix_align;
+  _noaccess_prefix = noaccess_prefix;
 }
 
 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
-                               char* requested_address) {
+                               char* requested_address,
+                               const size_t noaccess_prefix) {
   const size_t granularity = os::vm_allocation_granularity();
   assert((size & granularity - 1) == 0,
          "size not aligned to os::vm_allocation_granularity()");
@@ -181,6 +195,7 @@
   _size = 0;
   _special = false;
   _alignment = 0;
+  _noaccess_prefix = 0;
   if (size == 0) {
     return;
   }
@@ -220,7 +235,8 @@
     // important.  If available space is not detected, return NULL.
 
     if (requested_address != 0) {
-      base = os::attempt_reserve_memory_at(size, requested_address);
+      base = os::attempt_reserve_memory_at(size,
+                                           requested_address-noaccess_prefix);
     } else {
       base = os::reserve_memory(size, NULL, alignment);
     }
@@ -259,6 +275,11 @@
   _base = base;
   _size = size;
   _alignment = MAX2(alignment, (size_t) os::vm_page_size());
+  _noaccess_prefix = noaccess_prefix;
+
+  // Assert that if noaccess_prefix is used, it is the same as alignment.
+  assert(noaccess_prefix == 0 ||
+         noaccess_prefix == _alignment, "noaccess prefix wrong");
 
   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
          "area must be distinguisable from marks for mark-sweep");
@@ -274,6 +295,7 @@
   _base = base;
   _size = size;
   _alignment = alignment;
+  _noaccess_prefix = 0;
   _special = special;
 }
 
@@ -320,17 +342,58 @@
 
 void ReservedSpace::release() {
   if (is_reserved()) {
+    char *real_base = _base - _noaccess_prefix;
+    const size_t real_size = _size + _noaccess_prefix;
     if (special()) {
-      os::release_memory_special(_base, _size);
+      os::release_memory_special(real_base, real_size);
     } else{
-      os::release_memory(_base, _size);
+      os::release_memory(real_base, real_size);
     }
     _base = NULL;
     _size = 0;
+    _noaccess_prefix = 0;
     _special = false;
   }
 }
 
+void ReservedSpace::protect_noaccess_prefix(const size_t size) {
+  // If there is noaccess prefix, return.
+  if (_noaccess_prefix == 0) return;
+
+  assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
+         "must be at least page size big");
+
+  // Protect memory at the base of the allocated region.
+  // If special, the page was committed (only matters on windows)
+  if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
+                          _special)) {
+    fatal("cannot protect protection page");
+  }
+
+  _base += _noaccess_prefix;
+  _size -= _noaccess_prefix;
+  assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
+         "must be exactly of required size and alignment");
+}
+
+ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
+                                     bool large, char* requested_address) :
+  ReservedSpace(size, alignment, large,
+                requested_address,
+                UseCompressedOops ? lcm(os::vm_page_size(), alignment) : 0) {
+  // Only reserved space for the java heap should have a noaccess_prefix
+  // if using compressed oops.
+  protect_noaccess_prefix(size);
+}
+
+ReservedHeapSpace::ReservedHeapSpace(const size_t prefix_size,
+                                     const size_t prefix_align,
+                                     const size_t suffix_size,
+                                     const size_t suffix_align) :
+  ReservedSpace(prefix_size, prefix_align, suffix_size, suffix_align,
+                UseCompressedOops ? lcm(os::vm_page_size(), prefix_align) : 0) {
+  protect_noaccess_prefix(prefix_size+suffix_size);
+}
 
 // VirtualSpace
 
@@ -348,6 +411,7 @@
   _lower_alignment        = 0;
   _middle_alignment       = 0;
   _upper_alignment        = 0;
+  _special                = false;
 }
 
 
@@ -402,7 +466,8 @@
 
 
 void VirtualSpace::release() {
-  (void)os::release_memory(low_boundary(), reserved_size());
+  // This does not release memory it never reserved.
+  // Caller must release via rs.release();
   _low_boundary           = NULL;
   _high_boundary          = NULL;
   _low                    = NULL;
--- a/hotspot/src/share/vm/runtime/virtualspace.hpp	Wed Jul 05 16:39:00 2017 +0200
+++ b/hotspot/src/share/vm/runtime/virtualspace.hpp	Sat Jul 19 17:38:22 2008 -0400
@@ -29,13 +29,15 @@
  private:
   char*  _base;
   size_t _size;
+  size_t _noaccess_prefix;
   size_t _alignment;
   bool   _special;
 
   // ReservedSpace
   ReservedSpace(char* base, size_t size, size_t alignment, bool special);
   void initialize(size_t size, size_t alignment, bool large,
-                  char* requested_address = NULL);
+                  char* requested_address,
+                  const size_t noaccess_prefix);
 
   // Release parts of an already-reserved memory region [addr, addr + len) to
   // get a new region that has "compound alignment."  Return the start of the
@@ -59,13 +61,19 @@
                           const size_t suffix_size,
                           const size_t suffix_align);
 
+ protected:
+  // Create protection page at the beginning of the space.
+  void protect_noaccess_prefix(const size_t size);
+
  public:
   // Constructor
   ReservedSpace(size_t size);
   ReservedSpace(size_t size, size_t alignment, bool large,
-                char* requested_address = NULL);
+                char* requested_address = NULL,
+                const size_t noaccess_prefix = 0);
   ReservedSpace(const size_t prefix_size, const size_t prefix_align,
-                const size_t suffix_size, const size_t suffix_align);
+                const size_t suffix_size, const size_t suffix_align,
+                const size_t noaccess_prefix);
 
   // Accessors
   char*  base()      const { return _base;      }
@@ -73,6 +81,8 @@
   size_t alignment() const { return _alignment; }
   bool   special()   const { return _special;   }
 
+  size_t noaccess_prefix()   const { return _noaccess_prefix;   }
+
   bool is_reserved() const { return _base != NULL; }
   void release();
 
@@ -104,6 +114,16 @@
   return last_part(partition_size, alignment());
 }
 
+// Class encapsulating behavior specific of memory space reserved for Java heap
+class ReservedHeapSpace : public ReservedSpace {
+public:
+  // Constructor
+  ReservedHeapSpace(size_t size, size_t forced_base_alignment,
+                    bool large, char* requested_address);
+  ReservedHeapSpace(const size_t prefix_size, const size_t prefix_align,
+                    const size_t suffix_size, const size_t suffix_align);
+};
+
 // VirtualSpace is data structure for committing a previously reserved address range in smaller chunks.
 
 class VirtualSpace VALUE_OBJ_CLASS_SPEC {