8208277: Code cache heap (-XX:ReservedCodeCacheSize) doesn't work with 1GB LargePages
Summary: Use huge pages for code cache if ReservedCodeCacheSize == InitialCodeCacheSize
Reviewed-by: kvn
--- a/src/hotspot/share/code/codeCache.cpp Thu Dec 06 12:39:28 2018 +0530
+++ b/src/hotspot/share/code/codeCache.cpp Thu Dec 06 10:07:54 2018 +0100
@@ -289,7 +289,7 @@
// If large page support is enabled, align code heaps according to large
// page size to make sure that code cache is covered by large pages.
- const size_t alignment = MAX2(page_size(false), (size_t) os::vm_allocation_granularity());
+ const size_t alignment = MAX2(page_size(false, 8), (size_t) os::vm_allocation_granularity());
non_nmethod_size = align_up(non_nmethod_size, alignment);
profiled_size = align_down(profiled_size, alignment);
@@ -314,10 +314,14 @@
add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
}
-size_t CodeCache::page_size(bool aligned) {
+size_t CodeCache::page_size(bool aligned, size_t min_pages) {
if (os::can_execute_large_page_memory()) {
- return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, 8) :
- os::page_size_for_region_unaligned(ReservedCodeCacheSize, 8);
+ if (InitialCodeCacheSize < ReservedCodeCacheSize) {
+ // Make sure that the page size allows for an incremental commit of the reserved space
+ min_pages = MAX2(min_pages, (size_t)8);
+ }
+ return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
+ os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
} else {
return os::vm_page_size();
}
--- a/src/hotspot/share/code/codeCache.hpp Thu Dec 06 12:39:28 2018 +0530
+++ b/src/hotspot/share/code/codeCache.hpp Thu Dec 06 10:07:54 2018 +0100
@@ -111,7 +111,6 @@
static CodeHeap* get_code_heap(int code_blob_type); // Returns the CodeHeap for the given CodeBlobType
// Returns the name of the VM option to set the size of the corresponding CodeHeap
static const char* get_code_heap_flag_name(int code_blob_type);
- static size_t page_size(bool aligned = true); // Returns the page size used by the CodeCache
static ReservedCodeSpace reserve_heap_memory(size_t size); // Reserves one continuous chunk of memory for the CodeHeaps
// Iteration
@@ -133,6 +132,7 @@
public:
// Initialization
static void initialize();
+ static size_t page_size(bool aligned = true, size_t min_pages = 1); // Returns the page size used by the CodeCache
static int code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs);
--- a/src/hotspot/share/compiler/compilerDefinitions.cpp Thu Dec 06 12:39:28 2018 +0530
+++ b/src/hotspot/share/compiler/compilerDefinitions.cpp Thu Dec 06 10:07:54 2018 +0100
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "code/codeCache.hpp"
#include "runtime/globals.hpp"
#include "runtime/globals_extension.hpp"
#include "compiler/compilerDefinitions.hpp"
@@ -200,8 +201,10 @@
FLAG_SET_ERGO(uintx, ReservedCodeCacheSize,
MIN2(CODE_CACHE_DEFAULT_LIMIT, (size_t)ReservedCodeCacheSize * 5));
}
- // Enable SegmentedCodeCache if TieredCompilation is enabled and ReservedCodeCacheSize >= 240M
- if (FLAG_IS_DEFAULT(SegmentedCodeCache) && ReservedCodeCacheSize >= 240*M) {
+ // Enable SegmentedCodeCache if TieredCompilation is enabled, ReservedCodeCacheSize >= 240M
+ // and the code cache contains at least 8 pages (segmentation disables advantage of huge pages).
+ if (FLAG_IS_DEFAULT(SegmentedCodeCache) && ReservedCodeCacheSize >= 240*M &&
+ 8 * CodeCache::page_size() <= ReservedCodeCacheSize) {
FLAG_SET_ERGO(bool, SegmentedCodeCache, true);
}
if (!UseInterpreter) { // -Xcomp