src/hotspot/share/code/codeCache.cpp
changeset 52870 a76b7884b59a
parent 52815 10bb941d7fd4
child 52908 449a2ce77784
--- a/src/hotspot/share/code/codeCache.cpp	Thu Dec 06 12:39:28 2018 +0530
+++ b/src/hotspot/share/code/codeCache.cpp	Thu Dec 06 10:07:54 2018 +0100
@@ -289,7 +289,7 @@
 
   // If large page support is enabled, align code heaps according to large
   // page size to make sure that code cache is covered by large pages.
-  const size_t alignment = MAX2(page_size(false), (size_t) os::vm_allocation_granularity());
+  const size_t alignment = MAX2(page_size(false, 8), (size_t) os::vm_allocation_granularity());
   non_nmethod_size = align_up(non_nmethod_size, alignment);
   profiled_size    = align_down(profiled_size, alignment);
 
@@ -314,10 +314,14 @@
   add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
 }
 
-size_t CodeCache::page_size(bool aligned) {
+size_t CodeCache::page_size(bool aligned, size_t min_pages) {
   if (os::can_execute_large_page_memory()) {
-    return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, 8) :
-                     os::page_size_for_region_unaligned(ReservedCodeCacheSize, 8);
+    if (InitialCodeCacheSize < ReservedCodeCacheSize) {
+      // Make sure that the page size allows for an incremental commit of the reserved space
+      min_pages = MAX2(min_pages, (size_t)8);
+    }
+    return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
+                     os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
   } else {
     return os::vm_page_size();
   }