--- a/hotspot/src/share/vm/code/codeCache.cpp Thu Jan 15 19:23:48 2015 -0800
+++ b/hotspot/src/share/vm/code/codeCache.cpp Mon Jan 19 15:52:56 2015 +0100
@@ -233,8 +233,8 @@
ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
// Determine alignment
const size_t page_size = os::can_execute_large_page_memory() ?
- MIN2(os::page_size_for_region(InitialCodeCacheSize, 8),
- os::page_size_for_region(size, 8)) :
+ MIN2(os::page_size_for_region_aligned(InitialCodeCacheSize, 8),
+ os::page_size_for_region_aligned(size, 8)) :
os::vm_page_size();
const size_t granularity = os::vm_allocation_granularity();
const size_t r_align = MAX2(page_size, granularity);
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp Thu Jan 15 19:23:48 2015 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp Mon Jan 19 15:52:56 2015 +0100
@@ -162,8 +162,8 @@
"we should have already filtered out humongous regions");
assert(_end == orig_end(),
"we should have already filtered out humongous regions");
-
- _in_collection_set = false;
+ assert(!_in_collection_set,
+ err_msg("Should not clear heap region %u in the collection set", hrm_index()));
set_allocation_context(AllocationContext::system());
set_young_index_in_cset(-1);
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Thu Jan 15 19:23:48 2015 -0800
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Mon Jan 19 15:52:56 2015 +0100
@@ -1194,8 +1194,10 @@
return real_forwardee(old);
}
- new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
- old, m, sz);
+ if (!_promotion_failed) {
+ new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
+ old, m, sz);
+ }
if (new_obj == NULL) {
// promotion failed, forward to self
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/generationSizer.cpp Thu Jan 15 19:23:48 2015 -0800
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/generationSizer.cpp Mon Jan 19 15:52:56 2015 +0100
@@ -61,9 +61,9 @@
void GenerationSizer::initialize_size_info() {
trace_gen_sizes("ps heap raw");
- const size_t max_page_sz = os::page_size_for_region(_max_heap_byte_size, 8);
+ const size_t max_page_sz = os::page_size_for_region_aligned(_max_heap_byte_size, 8);
const size_t min_pages = 4; // 1 for eden + 1 for each survivor + 1 for old
- const size_t min_page_sz = os::page_size_for_region(_min_heap_byte_size, min_pages);
+ const size_t min_page_sz = os::page_size_for_region_aligned(_min_heap_byte_size, min_pages);
const size_t page_sz = MIN2(max_page_sz, min_page_sz);
// Can a page size be something else than a power of two?
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp Thu Jan 15 19:23:48 2015 -0800
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp Mon Jan 19 15:52:56 2015 +0100
@@ -41,7 +41,7 @@
const size_t words = bits / BitsPerWord;
const size_t raw_bytes = words * sizeof(idx_t);
- const size_t page_sz = os::page_size_for_region(raw_bytes, 10);
+ const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10);
const size_t granularity = os::vm_allocation_granularity();
_reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity));
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Thu Jan 15 19:23:48 2015 -0800
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Mon Jan 19 15:52:56 2015 +0100
@@ -403,7 +403,7 @@
ParallelCompactData::create_vspace(size_t count, size_t element_size)
{
const size_t raw_bytes = count * element_size;
- const size_t page_sz = os::page_size_for_region(raw_bytes, 10);
+ const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10);
const size_t granularity = os::vm_allocation_granularity();
_reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity));
--- a/hotspot/src/share/vm/memory/heap.cpp Thu Jan 15 19:23:48 2015 -0800
+++ b/hotspot/src/share/vm/memory/heap.cpp Mon Jan 19 15:52:56 2015 +0100
@@ -104,8 +104,8 @@
size_t page_size = os::vm_page_size();
if (os::can_execute_large_page_memory()) {
const size_t min_pages = 8;
- page_size = MIN2(os::page_size_for_region(committed_size, min_pages),
- os::page_size_for_region(rs.size(), min_pages));
+ page_size = MIN2(os::page_size_for_region_aligned(committed_size, min_pages),
+ os::page_size_for_region_aligned(rs.size(), min_pages));
}
const size_t granularity = os::vm_allocation_granularity();
--- a/hotspot/src/share/vm/runtime/os.cpp Thu Jan 15 19:23:48 2015 -0800
+++ b/hotspot/src/share/vm/runtime/os.cpp Mon Jan 19 15:52:56 2015 +0100
@@ -1401,15 +1401,17 @@
return (sp > (stack_limit + reserved_area));
}
-size_t os::page_size_for_region(size_t region_size, size_t min_pages) {
+size_t os::page_size_for_region(size_t region_size, size_t min_pages, bool must_be_aligned) {
assert(min_pages > 0, "sanity");
if (UseLargePages) {
const size_t max_page_size = region_size / min_pages;
for (size_t i = 0; _page_sizes[i] != 0; ++i) {
const size_t page_size = _page_sizes[i];
- if (page_size <= max_page_size && is_size_aligned(region_size, page_size)) {
- return page_size;
+ if (page_size <= max_page_size) {
+ if (!must_be_aligned || is_size_aligned(region_size, page_size)) {
+ return page_size;
+ }
}
}
}
@@ -1417,6 +1419,14 @@
return vm_page_size();
}
+size_t os::page_size_for_region_aligned(size_t region_size, size_t min_pages) {
+ return page_size_for_region(region_size, min_pages, true);
+}
+
+size_t os::page_size_for_region_unaligned(size_t region_size, size_t min_pages) {
+ return page_size_for_region(region_size, min_pages, false);
+}
+
#ifndef PRODUCT
void os::trace_page_sizes(const char* str, const size_t* page_sizes, int count)
{
@@ -1665,17 +1675,17 @@
static size_t large_page_size() {
const size_t large_page_size_example = 4 * M;
- return os::page_size_for_region(large_page_size_example, 1);
+ return os::page_size_for_region_aligned(large_page_size_example, 1);
}
- static void test_page_size_for_region() {
+ static void test_page_size_for_region_aligned() {
if (UseLargePages) {
const size_t small_page = small_page_size();
const size_t large_page = large_page_size();
if (large_page > small_page) {
size_t num_small_pages_in_large = large_page / small_page;
- size_t page = os::page_size_for_region(large_page, num_small_pages_in_large);
+ size_t page = os::page_size_for_region_aligned(large_page, num_small_pages_in_large);
assert_eq(page, small_page);
}
@@ -1688,21 +1698,53 @@
const size_t large_page = large_page_size();
if (large_page > small_page) {
const size_t unaligned_region = large_page + 17;
- size_t page = os::page_size_for_region(unaligned_region, 1);
+ size_t page = os::page_size_for_region_aligned(unaligned_region, 1);
assert_eq(page, small_page);
const size_t num_pages = 5;
const size_t aligned_region = large_page * num_pages;
- page = os::page_size_for_region(aligned_region, num_pages);
+ page = os::page_size_for_region_aligned(aligned_region, num_pages);
assert_eq(page, large_page);
}
}
}
+ static void test_page_size_for_region_unaligned() {
+ if (UseLargePages) {
+ // Given exact page size, should return that page size.
+ for (size_t i = 0; os::_page_sizes[i] != 0; i++) {
+ size_t expected = os::_page_sizes[i];
+ size_t actual = os::page_size_for_region_unaligned(expected, 1);
+ assert_eq(expected, actual);
+ }
+
+ // Given slightly larger size than a page size, return the page size.
+ for (size_t i = 0; os::_page_sizes[i] != 0; i++) {
+ size_t expected = os::_page_sizes[i];
+ size_t actual = os::page_size_for_region_unaligned(expected + 17, 1);
+ assert_eq(expected, actual);
+ }
+
+ // Given a slightly smaller size than a page size,
+ // return the next smaller page size.
+ if (os::_page_sizes[1] > os::_page_sizes[0]) {
+ size_t expected = os::_page_sizes[0];
+ size_t actual = os::page_size_for_region_unaligned(os::_page_sizes[1] - 17, 1);
+ assert_eq(actual, expected);
+ }
+
+ // Return small page size for values less than a small page.
+ size_t small_page = small_page_size();
+ size_t actual = os::page_size_for_region_unaligned(small_page - 17, 1);
+ assert_eq(small_page, actual);
+ }
+ }
+
public:
static void run_tests() {
- test_page_size_for_region();
+ test_page_size_for_region_aligned();
test_page_size_for_region_alignment();
+ test_page_size_for_region_unaligned();
}
};
--- a/hotspot/src/share/vm/runtime/os.hpp Thu Jan 15 19:23:48 2015 -0800
+++ b/hotspot/src/share/vm/runtime/os.hpp Mon Jan 19 15:52:56 2015 +0100
@@ -148,6 +148,7 @@
static void pd_free_memory(char *addr, size_t bytes, size_t alignment_hint);
static void pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint);
+ static size_t page_size_for_region(size_t region_size, size_t min_pages, bool must_be_aligned);
public:
static void init(void); // Called before command line parsing
@@ -267,8 +268,13 @@
// Returns the page size to use for a region of memory.
// region_size / min_pages will always be greater than or equal to the
- // returned value.
- static size_t page_size_for_region(size_t region_size, size_t min_pages);
+ // returned value. The returned value will divide region_size.
+ static size_t page_size_for_region_aligned(size_t region_size, size_t min_pages);
+
+ // Returns the page size to use for a region of memory.
+ // region_size / min_pages will always be greater than or equal to the
+ // returned value. The returned value might not divide region_size.
+ static size_t page_size_for_region_unaligned(size_t region_size, size_t min_pages);
// Return the largest page size that can be used
static size_t max_page_size() {
--- a/hotspot/src/share/vm/runtime/virtualspace.cpp Thu Jan 15 19:23:48 2015 -0800
+++ b/hotspot/src/share/vm/runtime/virtualspace.cpp Mon Jan 19 15:52:56 2015 +0100
@@ -38,7 +38,8 @@
}
ReservedSpace::ReservedSpace(size_t size) {
- size_t page_size = os::page_size_for_region(size, 1);
+ // Want to use large pages where possible and pad with small pages.
+ size_t page_size = os::page_size_for_region_unaligned(size, 1);
bool large_pages = page_size != (size_t)os::vm_page_size();
// Don't force the alignment to be large page aligned,
// since that will waste memory.
@@ -617,7 +618,7 @@
bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
- const size_t max_commit_granularity = os::page_size_for_region(rs.size(), 1);
+ const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
return initialize_with_granularity(rs, committed_size, max_commit_granularity);
}
@@ -1239,7 +1240,7 @@
case Disable:
return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
case Commit:
- return vs.initialize_with_granularity(rs, 0, os::page_size_for_region(rs.size(), 1));
+ return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
}
}
--- a/hotspot/test/gc/TestNUMAPageSize.java Thu Jan 15 19:23:48 2015 -0800
+++ b/hotspot/test/gc/TestNUMAPageSize.java Mon Jan 19 15:52:56 2015 +0100
@@ -25,6 +25,7 @@
* @test TestNUMAPageSize
* @summary Make sure that start up with NUMA support does not cause problems.
* @bug 8061467
+ * @requires (vm.opt.AggressiveOpts == null) | (vm.opt.AggressiveOpts == false)
* @key gc
* @key regression
* @run main/othervm -Xmx8M -XX:+UseNUMA TestNUMAPageSize
--- a/hotspot/test/gc/TestSmallHeap.java Thu Jan 15 19:23:48 2015 -0800
+++ b/hotspot/test/gc/TestSmallHeap.java Mon Jan 19 15:52:56 2015 +0100
@@ -25,6 +25,7 @@
* @test TestSmallHeap
* @bug 8067438
* @requires vm.gc=="null"
+ * @requires (vm.opt.AggressiveOpts=="null") | (vm.opt.AggressiveOpts=="false")
* @summary Verify that starting the VM with a small heap works
* @library /testlibrary /../../test/lib
* @build TestSmallHeap
@@ -33,8 +34,9 @@
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xmx2m -XX:+UseSerialGC TestSmallHeap
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xmx2m -XX:+UseG1GC TestSmallHeap
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xmx2m -XX:+UseConcMarkSweepGC TestSmallHeap
- *
- * Note: It would be nice to verify the minimal supported heap size (2m) here,
+ */
+
+/* Note: It would be nice to verify the minimal supported heap size (2m) here,
* but we align the heap size based on the card table size. And the card table
* size is aligned based on the minimal pages size provided by the os. This
* means that on most platforms, where the minimal page size is 4k, we get a
--- a/hotspot/test/gc/g1/TestHumongousCodeCacheRoots.java Thu Jan 15 19:23:48 2015 -0800
+++ b/hotspot/test/gc/g1/TestHumongousCodeCacheRoots.java Mon Jan 19 15:52:56 2015 +0100
@@ -116,7 +116,14 @@
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(finalargs.toArray(new String[0]));
OutputAnalyzer output = new OutputAnalyzer(pb.start());
- output.shouldHaveExitValue(0);
+ try {
+ output.shouldHaveExitValue(0);
+ } catch (RuntimeException e) {
+ // It's ok if there is no client vm in the jdk.
+ if (output.firstMatch("Unrecognized option: -client") == null) {
+ throw e;
+ }
+ }
return output;
}