6541756: Reduce executable C-heap
authorcoleenp
Wed, 25 Mar 2009 14:19:20 -0400
changeset 2268 bea8be80ec88
parent 2267 2e18a3d0155b
child 2270 2d1738b808cb
6541756: Reduce executable C-heap Summary: Add executable parameters to reserve_memory and commit_memory to reduce executable memory to only the Code Heap. Reviewed-by: xlu, kvn, acorn
hotspot/src/os/linux/vm/os_linux.cpp
hotspot/src/os/solaris/vm/os_solaris.cpp
hotspot/src/os/windows/vm/os_windows.cpp
hotspot/src/share/vm/memory/heap.cpp
hotspot/src/share/vm/runtime/os.hpp
hotspot/src/share/vm/runtime/virtualspace.cpp
hotspot/src/share/vm/runtime/virtualspace.hpp
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Mon Mar 23 10:42:20 2009 -0400
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Wed Mar 25 14:19:20 2009 -0400
@@ -2269,15 +2269,16 @@
 //       All it does is to check if there are enough free pages
 //       left at the time of mmap(). This could be a potential
 //       problem.
-bool os::commit_memory(char* addr, size_t size) {
-  uintptr_t res = (uintptr_t) ::mmap(addr, size,
-                                   PROT_READ|PROT_WRITE|PROT_EXEC,
+bool os::commit_memory(char* addr, size_t size, bool exec) {
+  int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
+  uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
                                    MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
   return res != (uintptr_t) MAP_FAILED;
 }
 
-bool os::commit_memory(char* addr, size_t size, size_t alignment_hint) {
-  return commit_memory(addr, size);
+bool os::commit_memory(char* addr, size_t size, size_t alignment_hint,
+                       bool exec) {
+  return commit_memory(addr, size, exec);
 }
 
 void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
@@ -2417,8 +2418,7 @@
 unsigned long* os::Linux::_numa_all_nodes;
 
 bool os::uncommit_memory(char* addr, size_t size) {
-  return ::mmap(addr, size,
-                PROT_READ|PROT_WRITE|PROT_EXEC,
+  return ::mmap(addr, size, PROT_NONE,
                 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0)
     != MAP_FAILED;
 }
@@ -2441,7 +2441,9 @@
     flags |= MAP_FIXED;
   }
 
-  addr = (char*)::mmap(requested_addr, bytes, PROT_READ|PROT_WRITE|PROT_EXEC,
+  // Map uncommitted pages PROT_READ and PROT_WRITE, change access
+  // to PROT_EXEC if executable when we commit the page.
+  addr = (char*)::mmap(requested_addr, bytes, PROT_READ|PROT_WRITE,
                        flags, -1, 0);
 
   if (addr != MAP_FAILED) {
@@ -2582,7 +2584,9 @@
 #define SHM_HUGETLB 04000
 #endif
 
-char* os::reserve_memory_special(size_t bytes, char* req_addr) {
+char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
+  // "exec" is passed in but not used.  Creating the shared image for
+  // the code cache doesn't have an SHM_X executable permission to check.
   assert(UseLargePages, "only for large pages");
 
   key_t key = IPC_PRIVATE;
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Mon Mar 23 10:42:20 2009 -0400
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Wed Mar 25 14:19:20 2009 -0400
@@ -2623,15 +2623,16 @@
   return page_size;
 }
 
-bool os::commit_memory(char* addr, size_t bytes) {
+bool os::commit_memory(char* addr, size_t bytes, bool exec) {
+  int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
   size_t size = bytes;
   return
-     NULL != Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED,
-                                 PROT_READ | PROT_WRITE | PROT_EXEC);
-}
-
-bool os::commit_memory(char* addr, size_t bytes, size_t alignment_hint) {
-  if (commit_memory(addr, bytes)) {
+     NULL != Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
+}
+
+bool os::commit_memory(char* addr, size_t bytes, size_t alignment_hint,
+                       bool exec) {
+  if (commit_memory(addr, bytes, exec)) {
     if (UseMPSS && alignment_hint > (size_t)vm_page_size()) {
       // If the large page size has been set and the VM
       // is using large pages, use the large page size
@@ -3220,7 +3221,9 @@
   return true;
 }
 
-char* os::reserve_memory_special(size_t bytes, char* addr) {
+char* os::reserve_memory_special(size_t bytes, char* addr, bool exec) {
+  // "exec" is passed in but not used.  Creating the shared image for
+  // the code cache doesn't have an SHM_X executable permission to check.
   assert(UseLargePages && UseISM, "only for ISM large pages");
 
   size_t size = bytes;
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Mon Mar 23 10:42:20 2009 -0400
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Wed Mar 25 14:19:20 2009 -0400
@@ -2189,7 +2189,8 @@
           if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base() ) {
                   addr = (address)((uintptr_t)addr &
                          (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
-                  os::commit_memory( (char *)addr, thread->stack_base() - addr );
+                  os::commit_memory((char *)addr, thread->stack_base() - addr,
+                                    false );
                   return EXCEPTION_CONTINUE_EXECUTION;
           }
           else
@@ -2565,8 +2566,7 @@
   assert((size_t)addr % os::vm_allocation_granularity() == 0,
          "reserve alignment");
   assert(bytes % os::vm_allocation_granularity() == 0, "reserve block size");
-  char* res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE,
-                                  PAGE_EXECUTE_READWRITE);
+  char* res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
   assert(res == NULL || addr == NULL || addr == res,
          "Unexpected address from reserve.");
   return res;
@@ -2595,7 +2595,7 @@
   return true;
 }
 
-char* os::reserve_memory_special(size_t bytes, char* addr) {
+char* os::reserve_memory_special(size_t bytes, char* addr, bool exec) {
 
   if (UseLargePagesIndividualAllocation) {
     if (TracePageSizes && Verbose) {
@@ -2618,7 +2618,7 @@
     p_buf = (char *) VirtualAlloc(addr,
                                  size_of_reserve,  // size of Reserve
                                  MEM_RESERVE,
-                                 PAGE_EXECUTE_READWRITE);
+                                 PAGE_READWRITE);
     // If reservation failed, return NULL
     if (p_buf == NULL) return NULL;
 
@@ -2659,7 +2659,13 @@
         p_new = (char *) VirtualAlloc(next_alloc_addr,
                                     bytes_to_rq,
                                     MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES,
-                                    PAGE_EXECUTE_READWRITE);
+                                    PAGE_READWRITE);
+        if (p_new != NULL && exec) {
+          DWORD oldprot;
+          // Windows doc says to use VirtualProtect to get execute permissions
+          VirtualProtect(next_alloc_addr, bytes_to_rq,
+                         PAGE_EXECUTE_READWRITE, &oldprot);
+        }
       }
 
       if (p_new == NULL) {
@@ -2688,10 +2694,12 @@
   } else {
     // normal policy just allocate it all at once
     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
-    char * res = (char *)VirtualAlloc(NULL,
-                                      bytes,
-                                      flag,
-                                      PAGE_EXECUTE_READWRITE);
+    char * res = (char *)VirtualAlloc(NULL, bytes, flag, PAGE_READWRITE);
+    if (res != NULL && exec) {
+      DWORD oldprot;
+      // Windows doc says to use VirtualProtect to get execute permissions
+      VirtualProtect(res, bytes, PAGE_EXECUTE_READWRITE, &oldprot);
+    }
     return res;
   }
 }
@@ -2703,7 +2711,7 @@
 void os::print_statistics() {
 }
 
-bool os::commit_memory(char* addr, size_t bytes) {
+bool os::commit_memory(char* addr, size_t bytes, bool exec) {
   if (bytes == 0) {
     // Don't bother the OS with noops.
     return true;
@@ -2712,11 +2720,19 @@
   assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
   // Don't attempt to print anything if the OS call fails. We're
   // probably low on resources, so the print itself may cause crashes.
-  return VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_EXECUTE_READWRITE) != NULL;
+  bool result = VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) != 0;
+  if (result != NULL && exec) {
+    DWORD oldprot;
+    // Windows doc says to use VirtualProtect to get execute permissions
+    return VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot) != 0;
+  } else {
+    return result;
+  }
 }
 
-bool os::commit_memory(char* addr, size_t size, size_t alignment_hint) {
-  return commit_memory(addr, size);
+bool os::commit_memory(char* addr, size_t size, size_t alignment_hint,
+                       bool exec) {
+  return commit_memory(addr, size, exec);
 }
 
 bool os::uncommit_memory(char* addr, size_t bytes) {
@@ -2750,7 +2766,7 @@
 
   // Strange enough, but on Win32 one can change protection only for committed
   // memory, not a big deal anyway, as bytes less or equal than 64K
-  if (!is_committed && !commit_memory(addr, bytes)) {
+  if (!is_committed && !commit_memory(addr, bytes, prot == MEM_PROT_RWX)) {
     fatal("cannot commit protection page");
   }
   // One cannot use os::guard_memory() here, as on Win32 guard page
@@ -3248,10 +3264,10 @@
 #endif
 
   if (!UseMembar) {
-    address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_EXECUTE_READWRITE);
+    address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE);
     guarantee( mem_serialize_page != NULL, "Reserve Failed for memory serialize page");
 
-    return_page  = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_EXECUTE_READWRITE);
+    return_page  = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE);
     guarantee( return_page != NULL, "Commit Failed for memory serialize page");
 
     os::set_memory_serialize_page( mem_serialize_page );
--- a/hotspot/src/share/vm/memory/heap.cpp	Mon Mar 23 10:42:20 2009 -0400
+++ b/hotspot/src/share/vm/memory/heap.cpp	Wed Mar 25 14:19:20 2009 -0400
@@ -112,7 +112,7 @@
 
   const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
     MAX2(page_size, granularity);
-  ReservedSpace rs(r_size, rs_align, rs_align > 0);
+  ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);
   os::trace_page_sizes("code heap", committed_size, reserved_size, page_size,
                        rs.base(), rs.size());
   if (!_memory.initialize(rs, c_size)) {
--- a/hotspot/src/share/vm/runtime/os.hpp	Mon Mar 23 10:42:20 2009 -0400
+++ b/hotspot/src/share/vm/runtime/os.hpp	Wed Mar 25 14:19:20 2009 -0400
@@ -202,8 +202,10 @@
   static char*  attempt_reserve_memory_at(size_t bytes, char* addr);
   static void   split_reserved_memory(char *base, size_t size,
                                       size_t split, bool realloc);
-  static bool   commit_memory(char* addr, size_t bytes);
-  static bool   commit_memory(char* addr, size_t size, size_t alignment_hint);
+  static bool   commit_memory(char* addr, size_t bytes,
+                              bool executable = false);
+  static bool   commit_memory(char* addr, size_t size, size_t alignment_hint,
+                              bool executable = false);
   static bool   uncommit_memory(char* addr, size_t bytes);
   static bool   release_memory(char* addr, size_t bytes);
 
@@ -243,7 +245,8 @@
 
   static char*  non_memory_address_word();
   // reserve, commit and pin the entire memory region
-  static char*  reserve_memory_special(size_t size, char* addr = NULL);
+  static char*  reserve_memory_special(size_t size, char* addr = NULL,
+                bool executable = false);
   static bool   release_memory_special(char* addr, size_t bytes);
   static bool   large_page_init();
   static size_t large_page_size();
--- a/hotspot/src/share/vm/runtime/virtualspace.cpp	Mon Mar 23 10:42:20 2009 -0400
+++ b/hotspot/src/share/vm/runtime/virtualspace.cpp	Wed Mar 25 14:19:20 2009 -0400
@@ -28,7 +28,7 @@
 
 // ReservedSpace
 ReservedSpace::ReservedSpace(size_t size) {
-  initialize(size, 0, false, NULL, 0);
+  initialize(size, 0, false, NULL, 0, false);
 }
 
 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
@@ -36,7 +36,13 @@
                              char* requested_address,
                              const size_t noaccess_prefix) {
   initialize(size+noaccess_prefix, alignment, large, requested_address,
-             noaccess_prefix);
+             noaccess_prefix, false);
+}
+
+ReservedSpace::ReservedSpace(size_t size, size_t alignment,
+                             bool large,
+                             bool executable) {
+  initialize(size, alignment, large, NULL, 0, executable);
 }
 
 char *
@@ -132,7 +138,8 @@
   const bool try_reserve_special = UseLargePages &&
     prefix_align == os::large_page_size();
   if (!os::can_commit_large_page_memory() && try_reserve_special) {
-    initialize(size, prefix_align, true, requested_address, noaccess_prefix);
+    initialize(size, prefix_align, true, requested_address, noaccess_prefix,
+               false);
     return;
   }
 
@@ -141,6 +148,7 @@
   _alignment = 0;
   _special = false;
   _noaccess_prefix = 0;
+  _executable = false;
 
   // Assert that if noaccess_prefix is used, it is the same as prefix_align.
   assert(noaccess_prefix == 0 ||
@@ -189,7 +197,8 @@
 
 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
                                char* requested_address,
-                               const size_t noaccess_prefix) {
+                               const size_t noaccess_prefix,
+                               bool executable) {
   const size_t granularity = os::vm_allocation_granularity();
   assert((size & granularity - 1) == 0,
          "size not aligned to os::vm_allocation_granularity()");
@@ -201,6 +210,7 @@
   _base = NULL;
   _size = 0;
   _special = false;
+  _executable = executable;
   _alignment = 0;
   _noaccess_prefix = 0;
   if (size == 0) {
@@ -214,7 +224,7 @@
 
   if (special) {
 
-    base = os::reserve_memory_special(size, requested_address);
+    base = os::reserve_memory_special(size, requested_address, executable);
 
     if (base != NULL) {
       // Check alignment constraints
@@ -284,7 +294,7 @@
 
 
 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
-                             bool special) {
+                             bool special, bool executable) {
   assert((size % os::vm_allocation_granularity()) == 0,
          "size not allocation aligned");
   _base = base;
@@ -292,6 +302,7 @@
   _alignment = alignment;
   _noaccess_prefix = 0;
   _special = special;
+  _executable = executable;
 }
 
 
@@ -299,9 +310,10 @@
                                         bool split, bool realloc) {
   assert(partition_size <= size(), "partition failed");
   if (split) {
-    os::split_reserved_memory(_base, _size, partition_size, realloc);
+    os::split_reserved_memory(base(), size(), partition_size, realloc);
   }
-  ReservedSpace result(base(), partition_size, alignment, special());
+  ReservedSpace result(base(), partition_size, alignment, special(),
+                       executable());
   return result;
 }
 
@@ -310,7 +322,7 @@
 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
   assert(partition_size <= size(), "partition failed");
   ReservedSpace result(base() + partition_size, size() - partition_size,
-                       alignment, special());
+                       alignment, special(), executable());
   return result;
 }
 
@@ -348,6 +360,7 @@
     _size = 0;
     _noaccess_prefix = 0;
     _special = false;
+    _executable = false;
   }
 }
 
@@ -396,6 +409,14 @@
   protect_noaccess_prefix(prefix_size+suffix_size);
 }
 
+// Reserve space for code segment.  Same as Java heap only we mark this as
+// executable.
+ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
+                                     size_t rs_align,
+                                     bool large) :
+  ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
+}
+
 // VirtualSpace
 
 VirtualSpace::VirtualSpace() {
@@ -413,6 +434,7 @@
   _middle_alignment       = 0;
   _upper_alignment        = 0;
   _special                = false;
+  _executable             = false;
 }
 
 
@@ -426,6 +448,7 @@
   _high = low();
 
   _special = rs.special();
+  _executable = rs.executable();
 
   // When a VirtualSpace begins life at a large size, make all future expansion
   // and shrinking occur aligned to a granularity of large pages.  This avoids
@@ -483,6 +506,7 @@
   _middle_alignment       = 0;
   _upper_alignment        = 0;
   _special                = false;
+  _executable             = false;
 }
 
 
@@ -592,7 +616,7 @@
     assert(low_boundary() <= lower_high() &&
            lower_high() + lower_needs <= lower_high_boundary(),
            "must not expand beyond region");
-    if (!os::commit_memory(lower_high(), lower_needs)) {
+    if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
       debug_only(warning("os::commit_memory failed"));
       return false;
     } else {
@@ -603,7 +627,8 @@
     assert(lower_high_boundary() <= middle_high() &&
            middle_high() + middle_needs <= middle_high_boundary(),
            "must not expand beyond region");
-    if (!os::commit_memory(middle_high(), middle_needs, middle_alignment())) {
+    if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
+                           _executable)) {
       debug_only(warning("os::commit_memory failed"));
       return false;
     }
@@ -613,7 +638,7 @@
     assert(middle_high_boundary() <= upper_high() &&
            upper_high() + upper_needs <= upper_high_boundary(),
            "must not expand beyond region");
-    if (!os::commit_memory(upper_high(), upper_needs)) {
+    if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
       debug_only(warning("os::commit_memory failed"));
       return false;
     } else {
--- a/hotspot/src/share/vm/runtime/virtualspace.hpp	Mon Mar 23 10:42:20 2009 -0400
+++ b/hotspot/src/share/vm/runtime/virtualspace.hpp	Wed Mar 25 14:19:20 2009 -0400
@@ -32,12 +32,15 @@
   size_t _noaccess_prefix;
   size_t _alignment;
   bool   _special;
+  bool   _executable;
 
   // ReservedSpace
-  ReservedSpace(char* base, size_t size, size_t alignment, bool special);
+  ReservedSpace(char* base, size_t size, size_t alignment, bool special,
+                bool executable);
   void initialize(size_t size, size_t alignment, bool large,
                   char* requested_address,
-                  const size_t noaccess_prefix);
+                  const size_t noaccess_prefix,
+                  bool executable);
 
   // Release parts of an already-reserved memory region [addr, addr + len) to
   // get a new region that has "compound alignment."  Return the start of the
@@ -75,16 +78,16 @@
                 const size_t suffix_size, const size_t suffix_align,
                 char* requested_address,
                 const size_t noaccess_prefix = 0);
+  ReservedSpace(size_t size, size_t alignment, bool large, bool executable);
 
   // Accessors
-  char*  base()      const { return _base;      }
-  size_t size()      const { return _size;      }
-  size_t alignment() const { return _alignment; }
-  bool   special()   const { return _special;   }
-
-  size_t noaccess_prefix()   const { return _noaccess_prefix;   }
-
-  bool is_reserved() const { return _base != NULL; }
+  char*  base()            const { return _base;      }
+  size_t size()            const { return _size;      }
+  size_t alignment()       const { return _alignment; }
+  bool   special()         const { return _special;   }
+  bool   executable()      const { return _executable;   }
+  size_t noaccess_prefix() const { return _noaccess_prefix;   }
+  bool is_reserved()       const { return _base != NULL; }
   void release();
 
   // Splitting
@@ -126,6 +129,13 @@
                     char* requested_address);
 };
 
+// Class encapsulating behavior specific memory space for Code
+class ReservedCodeSpace : public ReservedSpace {
+ public:
+  // Constructor
+  ReservedCodeSpace(size_t r_size, size_t rs_align, bool large);
+};
+
 // VirtualSpace is data structure for committing a previously reserved address range in smaller chunks.
 
 class VirtualSpace VALUE_OBJ_CLASS_SPEC {
@@ -143,6 +153,9 @@
   // os::commit_memory() or os::uncommit_memory().
   bool _special;
 
+  // Need to know if commit should be executable.
+  bool   _executable;
+
   // MPSS Support
   // Each virtualspace region has a lower, middle, and upper region.
   // Each region has an end boundary and a high pointer which is the