--- a/hotspot/src/share/vm/opto/compile.cpp Thu Sep 11 11:04:55 2008 -0700
+++ b/hotspot/src/share/vm/opto/compile.cpp Fri Sep 12 07:04:34 2008 -0700
@@ -2082,7 +2082,7 @@
in2 = n->in(2)->in(1);
} else if ( n->in(2)->Opcode() == Op_ConP ) {
const Type* t = n->in(2)->bottom_type();
- if (t == TypePtr::NULL_PTR) {
+ if (t == TypePtr::NULL_PTR && UseImplicitNullCheckForNarrowOop) {
Node *in1 = n->in(1);
if (Matcher::clone_shift_expressions) {
// x86, ARM and friends can handle 2 adds in addressing mode.
--- a/hotspot/src/share/vm/runtime/arguments.cpp Thu Sep 11 11:04:55 2008 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.cpp Fri Sep 12 07:04:34 2008 -0700
@@ -1204,15 +1204,17 @@
// Turn off until bug is fixed.
// FLAG_SET_ERGO(bool, UseCompressedOops, true);
}
+#ifdef _WIN64
+ if (UseLargePages && UseCompressedOops) {
+ // Cannot allocate guard pages for implicit checks in indexed addressing
+ // mode, when large pages are specified on windows.
+ FLAG_SET_DEFAULT(UseImplicitNullCheckForNarrowOop, false);
+ }
+#endif // _WIN64
} else {
if (UseCompressedOops && !FLAG_IS_DEFAULT(UseCompressedOops)) {
// If specified, give a warning
- if (UseConcMarkSweepGC){
- warning("Compressed Oops does not work with CMS");
- } else {
- warning(
- "Max heap size too large for Compressed Oops");
- }
+ warning( "Max heap size too large for Compressed Oops");
FLAG_SET_DEFAULT(UseCompressedOops, false);
}
}
--- a/hotspot/src/share/vm/runtime/globals.hpp Thu Sep 11 11:04:55 2008 -0700
+++ b/hotspot/src/share/vm/runtime/globals.hpp Fri Sep 12 07:04:34 2008 -0700
@@ -294,6 +294,9 @@
lp64_product(bool, CheckCompressedOops, trueInDebug, \
"generate checks in encoding/decoding code") \
\
+ product(bool, UseImplicitNullCheckForNarrowOop, true, \
+ "generate implicit null check in indexed addressing mode.") \
+ \
/* UseMembar is theoretically a temp flag used for memory barrier \
* removal testing. It was supposed to be removed before FCS but has \
* been re-added (see 6401008) */ \
--- a/hotspot/src/share/vm/runtime/thread.cpp Thu Sep 11 11:04:55 2008 -0700
+++ b/hotspot/src/share/vm/runtime/thread.cpp Fri Sep 12 07:04:34 2008 -0700
@@ -2756,13 +2756,17 @@
// For now, just manually iterate through them.
tc->do_thread(VMThread::vm_thread());
Universe::heap()->gc_threads_do(tc);
- {
- // Grab the Terminator_lock to prevent watcher_thread from being terminated.
- MutexLockerEx mu(Terminator_lock, Mutex::_no_safepoint_check_flag);
- WatcherThread *wt = WatcherThread::watcher_thread();
- if (wt != NULL)
- tc->do_thread(wt);
- }
+ WatcherThread *wt = WatcherThread::watcher_thread();
+ // Strictly speaking, the following NULL check isn't sufficient to make sure
+ // the data for WatcherThread is still valid upon being examined. However,
+ // considering that WatchThread terminates when the VM is on the way to
+ // exit at safepoint, the chance of the above is extremely small. The right
+ // way to prevent termination of WatcherThread would be to acquire
+ // Terminator_lock, but we can't do that without violating the lock rank
+ // checking in some cases.
+ if (wt != NULL)
+ tc->do_thread(wt);
+
// If CompilerThreads ever become non-JavaThreads, add them here
}
--- a/hotspot/src/share/vm/runtime/virtualspace.cpp Thu Sep 11 11:04:55 2008 -0700
+++ b/hotspot/src/share/vm/runtime/virtualspace.cpp Fri Sep 12 07:04:34 2008 -0700
@@ -380,7 +380,8 @@
bool large, char* requested_address) :
ReservedSpace(size, alignment, large,
requested_address,
- UseCompressedOops ? lcm(os::vm_page_size(), alignment) : 0) {
+ UseCompressedOops && UseImplicitNullCheckForNarrowOop ?
+ lcm(os::vm_page_size(), alignment) : 0) {
// Only reserved space for the java heap should have a noaccess_prefix
// if using compressed oops.
protect_noaccess_prefix(size);
@@ -391,7 +392,8 @@
const size_t suffix_size,
const size_t suffix_align) :
ReservedSpace(prefix_size, prefix_align, suffix_size, suffix_align,
- UseCompressedOops ? lcm(os::vm_page_size(), prefix_align) : 0) {
+ UseCompressedOops && UseImplicitNullCheckForNarrowOop ?
+ lcm(os::vm_page_size(), prefix_align) : 0) {
protect_noaccess_prefix(prefix_size+suffix_size);
}