--- a/hotspot/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp Mon Dec 08 18:57:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp Thu Dec 18 11:23:18 2014 +0000
@@ -131,6 +131,9 @@
_committed.set_range(start, start + size_in_pages);
MemRegion result((HeapWord*)page_start(start), byte_size_for_pages(size_in_pages) / HeapWordSize);
+ if (AlwaysPreTouch) {
+ os::pretouch_memory((char*)result.start(), (char*)result.end());
+ }
return result;
}
--- a/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.cpp Mon Dec 08 18:57:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.cpp Thu Dec 18 11:23:18 2014 +0000
@@ -63,9 +63,7 @@
}
void MutableSpace::pretouch_pages(MemRegion mr) {
- for (volatile char *p = (char*)mr.start(); p < (char*)mr.end(); p += os::vm_page_size()) {
- char t = *p; *p = t;
- }
+ os::pretouch_memory((char*)mr.start(), (char*)mr.end());
}
void MutableSpace::initialize(MemRegion mr,
--- a/hotspot/src/share/vm/runtime/os.cpp Mon Dec 08 18:57:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/os.cpp Thu Dec 18 11:23:18 2014 +0000
@@ -1588,6 +1588,11 @@
return res;
}
+void os::pretouch_memory(char* start, char* end) {
+ for (volatile char *p = start; p < end; p += os::vm_page_size()) {
+ *p = 0;
+ }
+}
char* os::map_memory(int fd, const char* file_name, size_t file_offset,
char *addr, size_t bytes, bool read_only,
--- a/hotspot/src/share/vm/runtime/os.hpp Mon Dec 08 18:57:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/os.hpp Thu Dec 18 11:23:18 2014 +0000
@@ -311,6 +311,12 @@
static bool uncommit_memory(char* addr, size_t bytes);
static bool release_memory(char* addr, size_t bytes);
+ // Touch memory pages that cover the memory range from start to end (exclusive)
+ // to make the OS back the memory range with actual memory.
+ // Current implementation may not touch the last page if unaligned addresses
+ // are passed.
+ static void pretouch_memory(char* start, char* end);
+
enum ProtType { MEM_PROT_NONE, MEM_PROT_READ, MEM_PROT_RW, MEM_PROT_RWX };
static bool protect_memory(char* addr, size_t bytes, ProtType prot,
bool is_committed = true);
--- a/hotspot/src/share/vm/runtime/virtualspace.cpp Mon Dec 08 18:57:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/virtualspace.cpp Thu Dec 18 11:23:18 2014 +0000
@@ -615,19 +615,7 @@
}
if (pre_touch || AlwaysPreTouch) {
- int vm_ps = os::vm_page_size();
- for (char* curr = previous_high;
- curr < unaligned_new_high;
- curr += vm_ps) {
- // Note the use of a write here; originally we tried just a read, but
- // since the value read was unused, the optimizer removed the read.
- // If we ever have a concurrent touchahead thread, we'll want to use
- // a read, to avoid the potential of overwriting data (if a mutator
- // thread beats the touchahead thread to a page). There are various
- // ways of making sure this read is not optimized away: for example,
- // generating the code for a read procedure at runtime.
- *curr = 0;
- }
+ os::pretouch_memory(previous_high, unaligned_new_high);
}
_high += bytes;