8139864: Improve handling of stack protection zones.
authorgoetz
Sun, 20 Dec 2015 10:37:23 -0500
changeset 35201 996db89f378e
parent 35192 76f4de26388d
child 35202 506ccf1717fd
8139864: Improve handling of stack protection zones. Reviewed-by: stuefe, coleenp, fparain
hotspot/src/cpu/aarch64/vm/frame_aarch64.cpp
hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp
hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp
hotspot/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp
hotspot/src/cpu/ppc/vm/globals_ppc.hpp
hotspot/src/cpu/ppc/vm/ppc.ad
hotspot/src/cpu/ppc/vm/sharedRuntime_ppc.cpp
hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp
hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp
hotspot/src/cpu/sparc/vm/templateInterpreterGenerator_sparc.cpp
hotspot/src/cpu/x86/vm/frame_x86.cpp
hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp
hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp
hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp
hotspot/src/cpu/zero/vm/stack_zero.hpp
hotspot/src/cpu/zero/vm/stack_zero.inline.hpp
hotspot/src/os/aix/vm/os_aix.cpp
hotspot/src/os/bsd/vm/os_bsd.cpp
hotspot/src/os/linux/vm/os_linux.cpp
hotspot/src/os/solaris/vm/os_solaris.cpp
hotspot/src/os/windows/vm/os_windows.cpp
hotspot/src/os/windows/vm/os_windows.inline.hpp
hotspot/src/os_cpu/aix_ppc/vm/os_aix_ppc.cpp
hotspot/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp
hotspot/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp
hotspot/src/os_cpu/linux_aarch64/vm/os_linux_aarch64.cpp
hotspot/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp
hotspot/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp
hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp
hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.cpp
hotspot/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp
hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp
hotspot/src/os_cpu/solaris_x86/vm/thread_solaris_x86.cpp
hotspot/src/share/vm/asm/assembler.cpp
hotspot/src/share/vm/interpreter/interpreter.cpp
hotspot/src/share/vm/prims/whitebox.cpp
hotspot/src/share/vm/runtime/globals.hpp
hotspot/src/share/vm/runtime/os.cpp
hotspot/src/share/vm/runtime/thread.cpp
hotspot/src/share/vm/runtime/thread.hpp
hotspot/src/share/vm/runtime/thread.inline.hpp
hotspot/src/share/vm/shark/sharkStack.cpp
--- a/hotspot/src/cpu/aarch64/vm/frame_aarch64.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/cpu/aarch64/vm/frame_aarch64.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -58,7 +58,8 @@
   address   unextended_sp = (address)_unextended_sp;
 
   // consider stack guards when trying to determine "safe" stack pointers
-  static size_t stack_guard_size = os::uses_stack_guard_pages() ? (StackYellowPages + StackRedPages) * os::vm_page_size() : 0;
+  static size_t stack_guard_size = os::uses_stack_guard_pages() ?
+    (JavaThread::stack_red_zone_size() + JavaThread::stack_yellow_zone_size()) : 0;
   size_t usable_stack_size = thread->stack_size() - stack_guard_size;
 
   // sp must be within the usable part of the stack (not in guards)
--- a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -3938,7 +3938,7 @@
   // was post-decremented.)  Skip this address by starting at i=1, and
   // touch a few more pages below.  N.B.  It is important to touch all
   // the way down to and including i=StackShadowPages.
-  for (int i = 0; i< StackShadowPages-1; i++) {
+  for (int i = 0; i < (JavaThread::stack_shadow_zone_size() / os::vm_page_size()) - 1; i++) {
     // this could be any sized move but this is can be a debugging crumb
     // so the bigger the better.
     lea(tmp, Address(tmp, -os::vm_page_size()));
--- a/hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -1542,7 +1542,7 @@
 
   // Generate stack overflow check
   if (UseStackBanging) {
-    __ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
+    __ bang_stack_with_offset(JavaThread::stack_shadow_zone_size());
   } else {
     Unimplemented();
   }
@@ -1949,7 +1949,7 @@
   Label reguard;
   Label reguard_done;
   __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
-  __ cmpw(rscratch1, JavaThread::stack_guard_yellow_disabled);
+  __ cmpw(rscratch1, JavaThread::stack_guard_yellow_reserved_disabled);
   __ br(Assembler::EQ, reguard);
   __ bind(reguard_done);
 
--- a/hotspot/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -474,12 +474,12 @@
   __ sub(rscratch1, rscratch1, rscratch2); // Stack limit
   __ add(r0, r0, rscratch1);
 
-  // Use the maximum number of pages we might bang.
-  const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
-                                                                              (StackRedPages+StackYellowPages);
+  // Use the bigger size for banging.
+  const int max_bang_size = MAX2(JavaThread::stack_shadow_zone_size(),
+                                 JavaThread::stack_red_zone_size() + JavaThread::stack_yellow_zone_size());
 
   // add in the red and yellow zone sizes
-  __ add(r0, r0, max_pages * page_size * 2);
+  __ add(r0, r0, max_bang_size * 2);
 
   // check against the current stack bottom
   __ cmp(sp, r0);
@@ -826,9 +826,10 @@
   // an interpreter frame with greater than a page of locals, so each page
   // needs to be checked.  Only true for non-native.
   if (UseStackBanging) {
-    const int start_page = native_call ? StackShadowPages : 1;
+    const int size_t n_shadow_pages = JavaThread::stack_shadow_zone_size() / os::vm_page_size();
+    const int start_page = native_call ? n_shadow_pages : 1;
     const int page_size = os::vm_page_size();
-    for (int pages = start_page; pages <= StackShadowPages ; pages++) {
+    for (int pages = start_page; pages <= n_shadow_pages ; pages++) {
       __ sub(rscratch2, sp, pages*page_size);
       __ str(zr, Address(rscratch2));
     }
--- a/hotspot/src/cpu/ppc/vm/globals_ppc.hpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/cpu/ppc/vm/globals_ppc.hpp	Sun Dec 20 10:37:23 2015 -0500
@@ -46,9 +46,9 @@
 #define DEFAULT_STACK_SHADOW_PAGES (6 DEBUG_ONLY(+2))
 #define DEFAULT_STACK_RESERVED_PAGES (0)
 
-#define MIN_STACK_YELLOW_PAGES (1)
+#define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
 #define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
-#define MIN_STACK_SHADOW_PAGES (1)
+#define MIN_STACK_SHADOW_PAGES (3 DEBUG_ONLY(+1))
 #define MIN_STACK_RESERVED_PAGES (0)
 
 define_pd_global(intx, StackYellowPages,      DEFAULT_STACK_YELLOW_PAGES);
--- a/hotspot/src/cpu/ppc/vm/ppc.ad	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/cpu/ppc/vm/ppc.ad	Sun Dec 20 10:37:23 2015 -0500
@@ -1308,7 +1308,7 @@
     // insert the code of generate_stack_overflow_check(), see
     // assembler.cpp for some illuminative comments.
     const int page_size = os::vm_page_size();
-    int bang_end = StackShadowPages * page_size;
+    int bang_end = JavaThread::stack_shadow_zone_size();
 
     // This is how far the previous frame's stack banging extended.
     const int bang_end_safe = bang_end;
--- a/hotspot/src/cpu/ppc/vm/sharedRuntime_ppc.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/cpu/ppc/vm/sharedRuntime_ppc.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -2388,7 +2388,7 @@
 
   Label no_reguard;
   __ lwz(r_temp_1, thread_(stack_guard_state));
-  __ cmpwi(CCR0, r_temp_1, JavaThread::stack_guard_yellow_disabled);
+  __ cmpwi(CCR0, r_temp_1, JavaThread::stack_guard_yellow_reserved_disabled);
   __ bne(CCR0, no_reguard);
 
   save_native_result(masm, ret_type, workspace_slot_offset);
--- a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -3595,7 +3595,7 @@
   // was post-decremented.)  Skip this address by starting at i=1, and
   // touch a few more pages below.  N.B.  It is important to touch all
   // the way down to and including i=StackShadowPages.
-  for (int i = 1; i < StackShadowPages; i++) {
+  for (int i = 1; i < JavaThread::stack_shadow_zone_size() / os::vm_page_size(); i++) {
     set((-i*offset)+STACK_BIAS, Rscratch);
     st(G0, Rtsp, Rscratch);
   }
--- a/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -2643,7 +2643,7 @@
 
   Label no_reguard;
   __ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch);
-  __ cmp_and_br_short(G3_scratch, JavaThread::stack_guard_yellow_disabled, Assembler::notEqual, Assembler::pt, no_reguard);
+  __ cmp_and_br_short(G3_scratch, JavaThread::stack_guard_yellow_reserved_disabled, Assembler::notEqual, Assembler::pt, no_reguard);
 
     save_native_result(masm, ret_type, stack_slots);
   __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
@@ -2936,7 +2936,7 @@
   int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code
 #ifdef ASSERT
   if (UseStackBanging) {
-    pad += StackShadowPages*16 + 32;
+    pad += (JavaThread::stack_shadow_zone_size() / os::vm_page_size())*16 + 32;
   }
 #endif
 #if INCLUDE_JVMCI
@@ -3225,7 +3225,7 @@
   int pad = VerifyThread ? 512 : 0;
 #ifdef ASSERT
   if (UseStackBanging) {
-    pad += StackShadowPages*16 + 32;
+    pad += (JavaThread::stack_shadow_zone_size() / os::vm_page_size())*16 + 32;
   }
 #endif
 #ifdef _LP64
--- a/hotspot/src/cpu/sparc/vm/templateInterpreterGenerator_sparc.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/cpu/sparc/vm/templateInterpreterGenerator_sparc.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -437,7 +437,7 @@
 
   // compute the beginning of the protected zone minus the requested frame size
   __ sub( Rscratch, Rscratch2,   Rscratch );
-  __ set( (StackRedPages+StackYellowPages) * page_size, Rscratch2 );
+  __ set( JavaThread::stack_red_zone_size() + JavaThread::stack_yellow_zone_size(), Rscratch2 );
   __ add( Rscratch, Rscratch2,   Rscratch );
 
   // Add in the size of the frame (which is the same as subtracting it from the
--- a/hotspot/src/cpu/x86/vm/frame_x86.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/cpu/x86/vm/frame_x86.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -56,7 +56,8 @@
   address   unextended_sp = (address)_unextended_sp;
 
   // consider stack guards when trying to determine "safe" stack pointers
-  static size_t stack_guard_size = os::uses_stack_guard_pages() ? (StackYellowPages + StackRedPages) * os::vm_page_size() : 0;
+  static size_t stack_guard_size = os::uses_stack_guard_pages() ?
+    JavaThread::stack_red_zone_size() + JavaThread::stack_yellow_zone_size() : 0;
   size_t usable_stack_size = thread->stack_size() - stack_guard_size;
 
   // sp must be within the usable part of the stack (not in guards)
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -1059,8 +1059,8 @@
   // touch it again.  (It was touched as (tmp-pagesize) but then tmp
   // was post-decremented.)  Skip this address by starting at i=1, and
   // touch a few more pages below.  N.B.  It is important to touch all
-  // the way down to and including i=StackShadowPages.
-  for (int i = 1; i < StackShadowPages; i++) {
+  // the way down including all pages in the shadow zone.
+  for (int i = 1; i < ((int)JavaThread::stack_shadow_zone_size() / os::vm_page_size()); i++) {
     // this could be any sized move but this is can be a debugging crumb
     // so the bigger the better.
     movptr(Address(tmp, (-i*os::vm_page_size())), size );
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -1776,7 +1776,7 @@
   // Generate stack overflow check
 
   if (UseStackBanging) {
-    __ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
+    __ bang_stack_with_offset((int)JavaThread::stack_shadow_zone_size());
   } else {
     // need a 5 byte instruction to allow MT safe patching to non-entrant
     __ fat_nop();
@@ -2151,7 +2151,7 @@
 
   Label reguard;
   Label reguard_done;
-  __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
+  __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_reserved_disabled);
   __ jcc(Assembler::equal, reguard);
 
   // slow path reguard  re-enters here
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -2065,7 +2065,7 @@
   // Generate stack overflow check
 
   if (UseStackBanging) {
-    __ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
+    __ bang_stack_with_offset((int)JavaThread::stack_shadow_zone_size());
   } else {
     // need a 5 byte instruction to allow MT safe patching to non-entrant
     __ fat_nop();
@@ -2499,7 +2499,7 @@
 
   Label reguard;
   Label reguard_done;
-  __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
+  __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_reserved_disabled);
   __ jcc(Assembler::equal, reguard);
   __ bind(reguard_done);
 
--- a/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -540,12 +540,12 @@
   __ addptr(rax, stack_base);
   __ subptr(rax, stack_size);
 
-  // Use the maximum number of pages we might bang.
-  const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages+StackReservedPages) ? StackShadowPages :
-                        (StackRedPages+StackYellowPages+StackReservedPages);
+  // Use the bigger size for banging.
+  const int max_bang_size = (int)MAX2(JavaThread::stack_shadow_zone_size(),
+                                      JavaThread::stack_guard_zone_size());
 
   // add in the red and yellow zone sizes
-  __ addptr(rax, max_pages * page_size);
+  __ addptr(rax, max_bang_size);
 
   // check against the current stack bottom
   __ cmpptr(rsp, rax);
@@ -1187,7 +1187,7 @@
   {
     Label no_reguard;
     __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()),
-            JavaThread::stack_guard_yellow_disabled);
+            JavaThread::stack_guard_yellow_reserved_disabled);
     __ jcc(Assembler::notEqual, no_reguard);
 
     __ pusha(); // XXX only save smashed registers
--- a/hotspot/src/cpu/zero/vm/stack_zero.hpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/cpu/zero/vm/stack_zero.hpp	Sun Dec 20 10:37:23 2015 -0500
@@ -40,7 +40,7 @@
  public:
   ZeroStack()
     : _base(NULL), _top(NULL), _sp(NULL) {
-    _shadow_pages_size = StackShadowPages * os::vm_page_size();
+    _shadow_pages_size = JavaThread::stack_shadow_zone_size();
   }
 
   bool needs_setup() const {
--- a/hotspot/src/cpu/zero/vm/stack_zero.inline.hpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/cpu/zero/vm/stack_zero.inline.hpp	Sun Dec 20 10:37:23 2015 -0500
@@ -49,10 +49,11 @@
 // value can be negative.
 inline int ZeroStack::abi_stack_available(Thread *thread) const {
   guarantee(Thread::current() == thread, "should run in the same thread");
-  int stack_used = thread->stack_base() - (address) &stack_used
-    + (StackYellowPages+StackRedPages+StackShadowPages) * os::vm_page_size();
-  int stack_free = thread->stack_size() - stack_used;
-  return stack_free;
+  assert(thread->stack_size() -
+         (thread->stack_base() - (address) &stack_used +
+          JavaThread::stack_guard_zone_size() + JavaThread::stack_shadow_zone_size()) ==
+         (address)&stack_used - thread->stack_overflow_limit(), "sanity");
+  return (address)&stack_used - stack_overflow_limit();
 }
 
 #endif // CPU_ZERO_VM_STACK_ZERO_INLINE_HPP
--- a/hotspot/src/os/aix/vm/os_aix.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/os/aix/vm/os_aix.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -845,7 +845,7 @@
   trcVerbose("newborn Thread : pthread-id %u, ktid " UINT64_FORMAT
     ", stack %p ... %p, stacksize 0x%IX (%IB)",
     pthread_id, kernel_thread_id,
-    thread->stack_base() - thread->stack_size(),
+    thread->stack_end(),
     thread->stack_base(),
     thread->stack_size(),
     thread->stack_size());
@@ -1014,7 +1014,7 @@
 
   trcVerbose("attaching Thread : pthread-id %u, ktid " UINT64_FORMAT ", stack %p ... %p, stacksize 0x%IX (%IB)",
     pthread_id, kernel_thread_id,
-    thread->stack_base() - thread->stack_size(),
+    thread->stack_end(),
     thread->stack_base(),
     thread->stack_size(),
     thread->stack_size());
@@ -3570,15 +3570,6 @@
   Aix::_main_thread = pthread_self();
 
   initial_time_count = os::elapsed_counter();
-
-  // If the pagesize of the VM is greater than 8K determine the appropriate
-  // number of initial guard pages. The user can change this with the
-  // command line arguments, if needed.
-  if (vm_page_size() > (int)Aix::vm_default_page_size()) {
-    StackYellowPages = 1;
-    StackRedPages = 1;
-    StackShadowPages = round_to((StackShadowPages*Aix::vm_default_page_size()), vm_page_size()) / vm_page_size();
-  }
 }
 
 // This is called _after_ the global arguments have been parsed.
@@ -3684,8 +3675,9 @@
   // Add in 2*BytesPerWord times page size to account for VM stack during
   // class initialization depending on 32 or 64 bit VM.
   os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
-            (size_t)(StackYellowPages+StackRedPages+StackShadowPages) * Aix::page_size() +
-                     (2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::vm_default_page_size());
+                                    JavaThread::stack_guard_zone_size() +
+                                    JavaThread::stack_shadow_zone_size() +
+                                    (2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::vm_default_page_size());
 
   os::Aix::min_stack_allowed = align_size_up(os::Aix::min_stack_allowed, os::Aix::page_size());
 
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -3479,8 +3479,9 @@
   // Add in 2*BytesPerWord times page size to account for VM stack during
   // class initialization depending on 32 or 64 bit VM.
   os::Bsd::min_stack_allowed = MAX2(os::Bsd::min_stack_allowed,
-                                    (size_t)(StackReservedPages+StackYellowPages+StackRedPages+StackShadowPages+
-                                    2*BytesPerWord COMPILER2_PRESENT(+1)) * Bsd::page_size());
+                                    JavaThread::stack_guard_zone_size() +
+                                    JavaThread::stack_shadow_zone_size() +
+                                    2*BytesPerWord COMPILER2_PRESENT(+1) * Bsd::page_size());
 
   size_t threadStackSizeInBytes = ThreadStackSize * K;
   if (threadStackSizeInBytes != 0 &&
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -621,7 +621,7 @@
   assert(t->osthread()->expanding_stack(), "expand should be set");
   assert(t->stack_base() != NULL, "stack_base was not initialized");
 
-  if (addr <  t->stack_base() && addr >= t->stack_yellow_zone_base()) {
+  if (addr <  t->stack_base() && addr >= t->stack_reserved_zone_base()) {
     sigset_t mask_all, old_sigset;
     sigfillset(&mask_all);
     pthread_sigmask(SIG_SETMASK, &mask_all, &old_sigset);
@@ -836,7 +836,7 @@
     // is no gap between the last two virtual memory regions.
 
     JavaThread *jt = (JavaThread *)thread;
-    address addr = jt->stack_yellow_zone_base();
+    address addr = jt->stack_reserved_zone_base();
     assert(addr != NULL, "initialization problem?");
     assert(jt->stack_available(addr) > 0, "stack guard should not be enabled");
 
@@ -1863,8 +1863,7 @@
     while (jt) {
       if (!jt->stack_guard_zone_unused() &&     // Stack not yet fully initialized
           jt->stack_guards_enabled()) {         // No pending stack overflow exceptions
-        if (!os::guard_memory((char *) jt->stack_red_zone_base() - jt->stack_red_zone_size(),
-                              jt->stack_yellow_zone_size() + jt->stack_red_zone_size())) {
+        if (!os::guard_memory((char *)jt->stack_end(), jt->stack_guard_zone_size())) {
           warning("Attempt to reguard stack yellow zone failed.");
         }
       }
@@ -4580,20 +4579,6 @@
   }
   // else it defaults to CLOCK_REALTIME
 
-  // If the pagesize of the VM is greater than 8K determine the appropriate
-  // number of initial guard pages.  The user can change this with the
-  // command line arguments, if needed.
-  if (vm_page_size() > (int)Linux::vm_default_page_size()) {
-    StackYellowPages = 1;
-    StackRedPages = 1;
-#if defined(IA32) || defined(IA64)
-    StackReservedPages = 1;
-#else
-    StackReservedPages = 0;
-#endif
-    StackShadowPages = round_to((StackShadowPages*Linux::vm_default_page_size()), vm_page_size()) / vm_page_size();
-  }
-
   // retrieve entry point for pthread_setname_np
   Linux::_pthread_setname_np =
     (int(*)(pthread_t, const char*))dlsym(RTLD_DEFAULT, "pthread_setname_np");
@@ -4652,7 +4637,8 @@
   // Add in 2*BytesPerWord times page size to account for VM stack during
   // class initialization depending on 32 or 64 bit VM.
   os::Linux::min_stack_allowed = MAX2(os::Linux::min_stack_allowed,
-                                      (size_t)(StackReservedPages+StackYellowPages+StackRedPages+StackShadowPages) * Linux::page_size() +
+                                      JavaThread::stack_guard_zone_size() +
+                                      JavaThread::stack_shadow_zone_size() +
                                       (2*BytesPerWord COMPILER2_PRESENT(+1)) * Linux::vm_default_page_size());
 
   size_t threadStackSizeInBytes = ThreadStackSize * K;
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -4359,15 +4359,6 @@
   // the minimum of what the OS supports (thr_min_stack()), and
   // enough to allow the thread to get to user bytecode execution.
   Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed);
-  // If the pagesize of the VM is greater than 8K determine the appropriate
-  // number of initial guard pages.  The user can change this with the
-  // command line arguments, if needed.
-  if (vm_page_size() > 8*K) {
-    StackYellowPages = 1;
-    StackRedPages = 1;
-    StackReservedPages = 1;
-    StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size();
-  }
 }
 
 // To install functions for atexit system call
@@ -4422,8 +4413,9 @@
   // Add in 2*BytesPerWord times page size to account for VM stack during
   // class initialization depending on 32 or 64 bit VM.
   os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
-                                        (size_t)(StackReservedPages+StackYellowPages+StackRedPages+StackShadowPages+
-                                        2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
+                                        JavaThread::stack_guard_zone_size() +
+                                        JavaThread::stack_shadow_zone_size() +
+                                        2*BytesPerWord COMPILER2_PRESENT(+1) * page_size);
 
   size_t threadStackSizeInBytes = ThreadStackSize * K;
   if (threadStackSizeInBytes != 0 &&
@@ -4443,7 +4435,8 @@
   if (vm_page_size() > 8*K) {
     threadStackSizeInBytes = (threadStackSizeInBytes != 0)
        ? threadStackSizeInBytes +
-         ((StackYellowPages + StackRedPages) * vm_page_size())
+         JavaThread::stack_red_zone_size() +
+         JavaThread::stack_yellow_zone_size()
        : 0;
     ThreadStackSize = threadStackSizeInBytes/K;
   }
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -2551,8 +2551,8 @@
         } else if(thread->addr_inside_register_stack(addr)) {
           // Disable the yellow zone which sets the state that
           // we've got a stack overflow problem.
-          if (thread->stack_yellow_zone_enabled()) {
-            thread->disable_stack_yellow_zone();
+          if (thread->stack_yellow_reserved_zone_enabled()) {
+            thread->disable_stack_yellow_reserved_zone();
           }
           // Give us some room to process the exception.
           thread->disable_register_stack_guard();
@@ -2587,7 +2587,7 @@
           // Yellow zone violation.  The o/s has unprotected the first yellow
           // zone page for us.  Note:  must call disable_stack_yellow_zone to
           // update the enabled status, even if the zone contains only one page.
-          thread->disable_stack_yellow_zone();
+          thread->disable_stack_yellow_reserved_zone();
           // If not in java code, return and hope for the best.
           return in_java
               ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
@@ -2616,7 +2616,7 @@
       if (in_java) {
         PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
         address addr = (address) exceptionRecord->ExceptionInformation[1];
-        address stack_end = thread->stack_base() - thread->stack_size();
+        address stack_end = thread->stack_end();
         if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
           // Stack overflow.
           assert(!os::uses_stack_guard_pages(),
@@ -2640,7 +2640,7 @@
           //
           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
           address addr = (address) exceptionRecord->ExceptionInformation[1];
-          if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base()) {
+          if (addr > thread->stack_reserved_zone_base() && addr < thread->stack_base()) {
             addr = (address)((uintptr_t)addr &
                              (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
             os::commit_memory((char *)addr, thread->stack_base() - addr,
@@ -4080,7 +4080,7 @@
 #endif // _WIN64
 #endif // PRODUCT
 
-// this is called _before_ the global arguments have been parsed
+// This is called _before_ the global arguments have been parsed
 void os::init(void) {
   _initial_pid = _getpid();
 
@@ -4185,8 +4185,9 @@
   // Add in 2*BytesPerWord times page size to account for VM stack during
   // class initialization depending on 32 or 64 bit VM.
   size_t min_stack_allowed =
-            (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
-                     2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size();
+            (size_t)(JavaThread::stack_yellow_zone_size() + JavaThread::stack_red_zone_size() +
+                     JavaThread::stack_shadow_zone_size() +
+                     (2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size());
   if (actual_reserve_size < min_stack_allowed) {
     tty->print_cr("\nThe stack size specified is too small, "
                   "Specify at least %dk",
--- a/hotspot/src/os/windows/vm/os_windows.inline.hpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/os/windows/vm/os_windows.inline.hpp	Sun Dec 20 10:37:23 2015 -0500
@@ -26,6 +26,7 @@
 #define OS_WINDOWS_VM_OS_WINDOWS_INLINE_HPP
 
 #include "runtime/os.hpp"
+#include "runtime/thread.hpp"
 
 inline const char* os::dll_file_extension()            { return ".dll"; }
 
@@ -72,7 +73,7 @@
   // the OS may not map an intervening page into our space
   // and may fault on a memory access to interior of our frame.
   address sp = current_stack_pointer();
-  for (int pages = 1; pages <= StackShadowPages; pages++) {
+  for (size_t pages = 1; pages <= (JavaThread::stack_shadow_zone_size() / os::vm_page_size()); pages++) {
     *((int *)(sp - (pages * vm_page_size()))) = 0;
   }
 }
--- a/hotspot/src/os_cpu/aix_ppc/vm/os_aix_ppc.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/os_cpu/aix_ppc/vm/os_aix_ppc.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -238,8 +238,7 @@
   if (thread != NULL) {
 
     // Handle ALL stack overflow variations here
-    if (sig == SIGSEGV && (addr < thread->stack_base() &&
-                           addr >= thread->stack_base() - thread->stack_size())) {
+    if (sig == SIGSEGV && thread->on_local_stack(addr)) {
       // stack overflow
       //
       // If we are in a yellow zone and we are inside java, we disable the yellow zone and
@@ -247,8 +246,8 @@
       // If we are in native code or VM C code, we report-and-die. The original coding tried
       // to continue with yellow zone disabled, but that doesn't buy us much and prevents
       // hs_err_pid files.
-      if (thread->in_stack_yellow_zone(addr)) {
-        thread->disable_stack_yellow_zone();
+      if (thread->in_stack_yellow_reserved_zone(addr)) {
+        thread->disable_stack_yellow_reserved_zone();
         if (thread->thread_state() == _thread_in_Java) {
           // Throw a stack overflow exception.
           // Guard pages will be reenabled while unwinding the stack.
--- a/hotspot/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -518,10 +518,9 @@
       address addr = (address) info->si_addr;
 
       // check if fault address is within thread stack
-      if (addr < thread->stack_base() &&
-          addr >= thread->stack_base() - thread->stack_size()) {
+      if (thread->on_local_stack(addr)) {
         // stack overflow
-        if (thread->in_stack_yellow_zone(addr)) {
+        if (thread->in_stack_yellow_reserved_zone(addr)) {
           if (thread->thread_state() == _thread_in_Java) {
             if (thread->in_stack_reserved_zone(addr)) {
               frame fr;
@@ -542,11 +541,11 @@
             }
             // Throw a stack overflow exception.  Guard pages will be reenabled
             // while unwinding the stack.
-            thread->disable_stack_yellow_zone();
+            thread->disable_stack_yellow_reserved_zone();
             stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
           } else {
             // Thread was in the vm or native code.  Return and try to finish.
-            thread->disable_stack_yellow_zone();
+            thread->disable_stack_yellow_reserved_zone();
             return 1;
           }
         } else if (thread->in_stack_red_zone(addr)) {
--- a/hotspot/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -187,11 +187,10 @@
       address addr = (address) info->si_addr;
 
       // check if fault address is within thread stack
-      if (addr < thread->stack_base() &&
-          addr >= thread->stack_base() - thread->stack_size()) {
+      if (thread->on_local_stack(addr)) {
         // stack overflow
-        if (thread->in_stack_yellow_zone(addr)) {
-          thread->disable_stack_yellow_zone();
+        if (thread->in_stack_yellow_reserved_zone(addr)) {
+          thread->disable_stack_yellow_reserved_zone();
           ShouldNotCallThis();
         }
         else if (thread->in_stack_red_zone(addr)) {
--- a/hotspot/src/os_cpu/linux_aarch64/vm/os_linux_aarch64.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/os_cpu/linux_aarch64/vm/os_linux_aarch64.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -330,11 +330,10 @@
       address addr = (address) info->si_addr;
 
       // check if fault address is within thread stack
-      if (addr < thread->stack_base() &&
-          addr >= thread->stack_base() - thread->stack_size()) {
+      if (thread->on_local_stack(addr)) {
         // stack overflow
-        if (thread->in_stack_yellow_zone(addr)) {
-          thread->disable_stack_yellow_zone();
+        if (thread->in_stack_yellow_reserved_zone(addr)) {
+          thread->disable_stack_yellow_reserved_zone();
           if (thread->thread_state() == _thread_in_Java) {
             // Throw a stack overflow exception.  Guard pages will be reenabled
             // while unwinding the stack.
--- a/hotspot/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -242,11 +242,10 @@
       address addr = ((NativeInstruction*)pc)->get_stack_bang_address(uc);
 
       // Check if fault address is within thread stack.
-      if (addr < thread->stack_base() &&
-          addr >= thread->stack_base() - thread->stack_size()) {
+      if (thread->on_local_stack(addr)) {
         // stack overflow
-        if (thread->in_stack_yellow_zone(addr)) {
-          thread->disable_stack_yellow_zone();
+        if (thread->in_stack_yellow_reserved_zone(addr)) {
+          thread->disable_stack_yellow_reserved_zone();
           if (thread->thread_state() == _thread_in_Java) {
             // Throw a stack overflow exception.
             // Guard pages will be reenabled while unwinding the stack.
--- a/hotspot/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -380,11 +380,10 @@
                                  JavaThread* thread,
                                  address* stub) {
   // check if fault address is within thread stack
-  if (addr < thread->stack_base() &&
-      addr >= thread->stack_base() - thread->stack_size()) {
+  if (thread->on_local_stack(addr)) {
     // stack overflow
-    if (thread->in_stack_yellow_zone(addr)) {
-      thread->disable_stack_yellow_zone();
+    if (thread->in_stack_yellow_reserved_zone(addr)) {
+      thread->disable_stack_yellow_reserved_zone();
       if (thread->thread_state() == _thread_in_Java) {
         // Throw a stack overflow exception.  Guard pages will be reenabled
         // while unwinding the stack.
--- a/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -346,10 +346,9 @@
       address addr = (address) info->si_addr;
 
       // check if fault address is within thread stack
-      if (addr < thread->stack_base() &&
-          addr >= thread->stack_base() - thread->stack_size()) {
+      if (thread->on_local_stack(addr)) {
         // stack overflow
-        if (thread->in_stack_yellow_zone(addr)) {
+        if (thread->in_stack_yellow_reserved_zone(addr)) {
           if (thread->thread_state() == _thread_in_Java) {
             if (thread->in_stack_reserved_zone(addr)) {
               frame fr;
@@ -371,11 +370,11 @@
             }
             // Throw a stack overflow exception.  Guard pages will be reenabled
             // while unwinding the stack.
-            thread->disable_stack_yellow_zone();
+            thread->disable_stack_yellow_reserved_zone();
             stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
           } else {
             // Thread was in the vm or native code.  Return and try to finish.
-            thread->disable_stack_yellow_zone();
+            thread->disable_stack_yellow_reserved_zone();
             return 1;
           }
         } else if (thread->in_stack_red_zone(addr)) {
@@ -931,10 +930,10 @@
    * If we are embedded in an app other than launcher (initial != main stack),
    * we don't have much control or understanding of the address space, just let it slide.
    */
-  char* hint = (char*) (Linux::initial_thread_stack_bottom() -
-                        ((StackReservedPages + StackYellowPages + StackRedPages + 1) * page_size));
+  char* hint = (char*)(Linux::initial_thread_stack_bottom() -
+                       (JavaThread::stack_guard_zone_size() + page_size));
   char* codebuf = os::attempt_reserve_memory_at(page_size, hint);
-  if ( (codebuf == NULL) || (!os::commit_memory(codebuf, page_size, true)) ) {
+  if ((codebuf == NULL) || (!os::commit_memory(codebuf, page_size, true))) {
     return; // No matter, we tried, best effort.
   }
 
--- a/hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -178,11 +178,10 @@
       address addr = (address) info->si_addr;
 
       // check if fault address is within thread stack
-      if (addr < thread->stack_base() &&
-          addr >= thread->stack_base() - thread->stack_size()) {
+      if (thread->on_local_stack(addr)) {
         // stack overflow
-        if (thread->in_stack_yellow_zone(addr)) {
-          thread->disable_stack_yellow_zone();
+        if (thread->in_stack_yellow_reserved_zone(addr)) {
+          thread->disable_stack_yellow_reserved_zone();
           ShouldNotCallThis();
         }
         else if (thread->in_stack_red_zone(addr)) {
--- a/hotspot/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -402,7 +402,7 @@
     // Handle ALL stack overflow variations here
     if (sig == SIGSEGV && info->si_code == SEGV_ACCERR) {
       address addr = (address) info->si_addr;
-      if (thread->in_stack_yellow_zone(addr)) {
+      if (thread->in_stack_yellow_reserved_zone(addr)) {
         // Sometimes the register windows are not properly flushed.
         if(uc->uc_mcontext.gwins != NULL) {
           ::handle_unflushed_register_windows(uc->uc_mcontext.gwins);
@@ -424,11 +424,11 @@
           }
           // Throw a stack overflow exception.  Guard pages will be reenabled
           // while unwinding the stack.
-          thread->disable_stack_yellow_zone();
+          thread->disable_stack_yellow_reserved_zone();
           stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
         } else {
           // Thread was in the vm or native code.  Return and try to finish.
-          thread->disable_stack_yellow_zone();
+          thread->disable_stack_yellow_reserved_zone();
           return true;
         }
       } else if (thread->in_stack_red_zone(addr)) {
--- a/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -465,7 +465,7 @@
     // Handle ALL stack overflow variations here
     if (sig == SIGSEGV && info->si_code == SEGV_ACCERR) {
       address addr = (address) info->si_addr;
-      if (thread->in_stack_yellow_zone(addr)) {
+      if (thread->in_stack_yellow_reserved_zone(addr)) {
         if (thread->thread_state() == _thread_in_Java) {
           if (thread->in_stack_reserved_zone(addr)) {
             frame fr;
@@ -486,11 +486,11 @@
           }
           // Throw a stack overflow exception.  Guard pages will be reenabled
           // while unwinding the stack.
-          thread->disable_stack_yellow_zone();
+          thread->disable_stack_yellow_reserved_zone();
           stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
         } else {
           // Thread was in the vm or native code.  Return and try to finish.
-          thread->disable_stack_yellow_zone();
+          thread->disable_stack_yellow_reserved_zone();
           return true;
         }
       } else if (thread->in_stack_red_zone(addr)) {
--- a/hotspot/src/os_cpu/solaris_x86/vm/thread_solaris_x86.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/os_cpu/solaris_x86/vm/thread_solaris_x86.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -65,23 +65,19 @@
 
   // Something would really have to be screwed up to get a NULL pc
 
-  if (addr.pc() == NULL ) {
+  if (addr.pc() == NULL) {
     assert(false, "NULL pc from signal handler!");
     return false;
-
   }
 
   // If sp and fp are nonsense just leave them out
 
-  if ((address)ret_sp >= jt->stack_base() ||
-      (address)ret_sp < jt->stack_base() - jt->stack_size() ) {
-
-      ret_sp = NULL;
-      ret_fp = NULL;
+  if (!jt->on_local_stack((address)ret_sp)) {
+    ret_sp = NULL;
+    ret_fp = NULL;
   } else {
-
     // sp is reasonable is fp reasonable?
-    if ( (address)ret_fp >= jt->stack_base() || ret_fp < ret_sp) {
+    if ((address)ret_fp >= jt->stack_base() || ret_fp < ret_sp) {
       ret_fp = NULL;
     }
   }
--- a/hotspot/src/share/vm/asm/assembler.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/share/vm/asm/assembler.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -23,12 +23,13 @@
  */
 
 #include "precompiled.hpp"
+#include "asm/codeBuffer.hpp"
 #include "asm/macroAssembler.hpp"
 #include "asm/macroAssembler.inline.hpp"
-#include "asm/codeBuffer.hpp"
 #include "runtime/atomic.inline.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
+#include "runtime/thread.hpp"
 
 
 // Implementation of AbstractAssembler
@@ -132,7 +133,7 @@
     // is greater than a page.
 
     const int page_size = os::vm_page_size();
-    int bang_end = StackShadowPages * page_size;
+    int bang_end = (int)JavaThread::stack_shadow_zone_size();
 
     // This is how far the previous frame's stack banging extended.
     const int bang_end_safe = bang_end;
--- a/hotspot/src/share/vm/interpreter/interpreter.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/share/vm/interpreter/interpreter.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -532,9 +532,10 @@
   // an interpreter frame with greater than a page of locals, so each page
   // needs to be checked.  Only true for non-native.
   if (UseStackBanging) {
-    const int start_page = native_call ? StackShadowPages : 1;
     const int page_size = os::vm_page_size();
-    for (int pages = start_page; pages <= StackShadowPages ; pages++) {
+    const int n_shadow_pages = ((int)JavaThread::stack_shadow_zone_size()) / page_size;
+    const int start_page = native_call ? n_shadow_pages : 1;
+    for (int pages = start_page; pages <= n_shadow_pages; pages++) {
       __ bang_stack_with_offset(pages*page_size);
     }
   }
--- a/hotspot/src/share/vm/prims/whitebox.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/share/vm/prims/whitebox.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -1201,7 +1201,7 @@
 
 WB_ENTRY(jlong, WB_GetThreadRemainingStackSize(JNIEnv* env, jobject o))
   JavaThread* t = JavaThread::current();
-  return (jlong) t->stack_available(os::current_stack_pointer()) - (jlong) StackShadowPages * os::vm_page_size();
+  return (jlong) t->stack_available(os::current_stack_pointer()) - (jlong)JavaThread::stack_shadow_zone_size();
 WB_END
 
 
--- a/hotspot/src/share/vm/runtime/globals.hpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Sun Dec 20 10:37:23 2015 -0500
@@ -3428,15 +3428,18 @@
                                                                             \
   /* stack parameters */                                                    \
   product_pd(intx, StackYellowPages,                                        \
-          "Number of yellow zone (recoverable overflows) pages")            \
+          "Number of yellow zone (recoverable overflows) pages of size "    \
+          "4KB. If pages are bigger yellow zone is aligned up.")            \
           range(MIN_STACK_YELLOW_PAGES, (DEFAULT_STACK_YELLOW_PAGES+5))     \
                                                                             \
   product_pd(intx, StackRedPages,                                           \
-          "Number of red zone (unrecoverable overflows) pages")             \
+          "Number of red zone (unrecoverable overflows) pages of size "     \
+          "4KB. If pages are bigger red zone is aligned up.")               \
           range(MIN_STACK_RED_PAGES, (DEFAULT_STACK_RED_PAGES+2))           \
                                                                             \
   product_pd(intx, StackReservedPages,                                      \
-          "Number of reserved zone (reserved to annotated methods) pages")  \
+          "Number of reserved zone (reserved to annotated methods) pages"   \
+          " of size 4KB. If pages are bigger reserved zone is aligned up.") \
           range(MIN_STACK_RESERVED_PAGES, (DEFAULT_STACK_RESERVED_PAGES+10))\
                                                                             \
   product(bool, RestrictReservedStack, true,                                \
@@ -3444,13 +3447,14 @@
                                                                             \
   /* greater stack shadow pages can't generate instruction to bang stack */ \
   product_pd(intx, StackShadowPages,                                        \
-          "Number of shadow zone (for overflow checking) pages "            \
-          "this should exceed the depth of the VM and native call stack")   \
+          "Number of shadow zone (for overflow checking) pages of size "    \
+          "4KB. If pages are bigger shadow zone is aligned up. "            \
+          "This should exceed the depth of the VM and native call stack.")  \
           range(MIN_STACK_SHADOW_PAGES, (DEFAULT_STACK_SHADOW_PAGES+30))    \
                                                                             \
   product_pd(intx, ThreadStackSize,                                         \
           "Thread Stack Size (in Kbytes)")                                  \
-          range(0, max_intx-os::vm_page_size())                             \
+          range(0, (max_intx-os::vm_page_size())/(1 * K))                   \
                                                                             \
   product_pd(intx, VMThreadStackSize,                                       \
           "Non-Java Thread Stack Size (in Kbytes)")                         \
@@ -3458,7 +3462,7 @@
                                                                             \
   product_pd(intx, CompilerThreadStackSize,                                 \
           "Compiler Thread Stack Size (in Kbytes)")                         \
-          range(0, max_intx /(1 * K))                                       \
+          range(0, max_intx/(1 * K))                                        \
                                                                             \
   develop_pd(size_t, JVMInvokeMethodSlack,                                  \
           "Stack space (bytes) required for JVM_InvokeMethod to complete")  \
--- a/hotspot/src/share/vm/runtime/os.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/share/vm/runtime/os.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -316,8 +316,16 @@
   // decisions depending on large page support and the calculated large page size.
   large_page_init();
 
+  // We need to adapt the configured number of stack protection pages given
+  // in 4K pages to the actual os page size. We must do this before setting
+  // up minimal stack sizes etc. in os::init_2().
+  JavaThread::set_stack_red_zone_size     (align_size_up(StackRedPages      * 4 * K, vm_page_size()));
+  JavaThread::set_stack_yellow_zone_size  (align_size_up(StackYellowPages   * 4 * K, vm_page_size()));
+  JavaThread::set_stack_reserved_zone_size(align_size_up(StackReservedPages * 4 * K, vm_page_size()));
+  JavaThread::set_stack_shadow_zone_size  (align_size_up(StackShadowPages   * 4 * K, vm_page_size()));
+
   // VM version initialization identifies some characteristics of the
-  // the platform that are used during ergonomic decisions.
+  // platform that are used during ergonomic decisions.
   VM_Version::init_before_ergo();
 }
 
@@ -1015,8 +1023,7 @@
     }
     // If the addr is in the stack region for this thread then report that
     // and print thread info
-    if (thread->stack_base() >= addr &&
-        addr > (thread->stack_base() - thread->stack_size())) {
+    if (thread->on_local_stack(addr)) {
       st->print_cr(INTPTR_FORMAT " is pointing into the stack for thread: "
                    INTPTR_FORMAT, p2i(addr), p2i(thread));
       if (verbose) thread->print_on(st);
@@ -1375,9 +1382,8 @@
 
 // Returns true if the current stack pointer is above the stack shadow
 // pages, false otherwise.
-
 bool os::stack_shadow_pages_available(Thread *thread, const methodHandle& method) {
-  assert(StackRedPages > 0 && StackYellowPages > 0,"Sanity check");
+  if (!thread->is_Java_thread()) return false;
   address sp = current_stack_pointer();
   // Check if we have StackShadowPages above the yellow zone.  This parameter
   // is dependent on the depth of the maximum VM call stack possible from
@@ -1386,12 +1392,13 @@
   // respectively.
   const int framesize_in_bytes =
     Interpreter::size_top_interpreter_activation(method()) * wordSize;
-  int reserved_area = ((StackShadowPages + StackRedPages + StackYellowPages
-                      + StackReservedPages) * vm_page_size())
-                      + framesize_in_bytes;
-  // The very lower end of the stack
-  address stack_limit = thread->stack_base() - thread->stack_size();
-  return (sp > (stack_limit + reserved_area));
+
+  assert((thread->stack_base() - thread->stack_size()) +
+         (JavaThread::stack_guard_zone_size() +
+          JavaThread::stack_shadow_zone_size() + framesize_in_bytes) ==
+         ((JavaThread*)thread)->stack_overflow_limit() + framesize_in_bytes, "sanity");
+
+  return (sp > ((JavaThread*)thread)->stack_overflow_limit() + framesize_in_bytes);
 }
 
 size_t os::page_size_for_region(size_t region_size, size_t min_pages, bool must_be_aligned) {
--- a/hotspot/src/share/vm/runtime/thread.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -319,8 +319,7 @@
 
 #if INCLUDE_NMT
   // record thread's native stack, stack grows downward
-  address stack_low_addr = stack_base() - stack_size();
-  MemTracker::record_thread_stack(stack_low_addr, stack_size());
+  MemTracker::record_thread_stack(stack_end(), stack_size());
 #endif // INCLUDE_NMT
 }
 
@@ -337,8 +336,7 @@
   // not proper way to enforce that.
 #if INCLUDE_NMT
   if (_stack_base != NULL) {
-    address low_stack_addr = stack_base() - stack_size();
-    MemTracker::release_thread_stack(low_stack_addr, stack_size());
+    MemTracker::release_thread_stack(stack_end(), stack_size());
 #ifdef ASSERT
     set_stack_base(NULL);
 #endif
@@ -821,7 +819,7 @@
   else                                st->print("Thread");
 
   st->print(" [stack: " PTR_FORMAT "," PTR_FORMAT "]",
-            p2i(_stack_base - _stack_size), p2i(_stack_base));
+            p2i(stack_end()), p2i(stack_base()));
 
   if (osthread()) {
     st->print(" [id=%d]", osthread()->thread_id());
@@ -907,9 +905,8 @@
   return false;
 }
 
-
 bool Thread::is_in_usable_stack(address adr) const {
-  size_t stack_guard_size = os::uses_stack_guard_pages() ? (StackReservedPages + StackYellowPages + StackRedPages) * os::vm_page_size() : 0;
+  size_t stack_guard_size = os::uses_stack_guard_pages() ? JavaThread::stack_guard_zone_size() : 0;
   size_t usable_stack_size = _stack_size - stack_guard_size;
 
   return ((adr < stack_base()) && (adr >= stack_base() - usable_stack_size));
@@ -1534,7 +1531,7 @@
 }
 
 bool JavaThread::reguard_stack(address cur_sp) {
-  if (_stack_guard_state != stack_guard_yellow_disabled
+  if (_stack_guard_state != stack_guard_yellow_reserved_disabled
       && _stack_guard_state != stack_guard_reserved_disabled) {
     return true; // Stack already guarded or guard pages not needed.
   }
@@ -1551,9 +1548,10 @@
   // is executing there, either StackShadowPages should be larger, or
   // some exception code in c1, c2 or the interpreter isn't unwinding
   // when it should.
-  guarantee(cur_sp > stack_yellow_zone_base(), "not enough space to reguard - increase StackShadowPages");
-  if (_stack_guard_state == stack_guard_yellow_disabled) {
-    enable_stack_yellow_zone();
+  guarantee(cur_sp > stack_reserved_zone_base(),
+            "not enough space to reguard - increase StackShadowPages");
+  if (_stack_guard_state == stack_guard_yellow_reserved_disabled) {
+    enable_stack_yellow_reserved_zone();
     if (reserved_stack_activation() != stack_base()) {
       set_reserved_stack_activation(stack_base());
     }
@@ -2480,10 +2478,15 @@
   }
 }
 
+size_t JavaThread::_stack_red_zone_size = 0;
+size_t JavaThread::_stack_yellow_zone_size = 0;
+size_t JavaThread::_stack_reserved_zone_size = 0;
+size_t JavaThread::_stack_shadow_zone_size = 0;
+
 void JavaThread::create_stack_guard_pages() {
-  if (! os::uses_stack_guard_pages() || _stack_guard_state != stack_guard_unused) return;
-  address low_addr = stack_base() - stack_size();
-  size_t len = (StackReservedPages + StackYellowPages + StackRedPages) * os::vm_page_size();
+  if (!os::uses_stack_guard_pages() || _stack_guard_state != stack_guard_unused) { return; }
+  address low_addr = stack_end();
+  size_t len = stack_guard_zone_size();
 
   int allocate = os::allocate_stack_guard_pages();
   // warning("Guarding at " PTR_FORMAT " for len " SIZE_FORMAT "\n", low_addr, len);
@@ -2506,8 +2509,8 @@
 void JavaThread::remove_stack_guard_pages() {
   assert(Thread::current() == this, "from different thread");
   if (_stack_guard_state == stack_guard_unused) return;
-  address low_addr = stack_base() - stack_size();
-  size_t len = (StackReservedPages + StackYellowPages + StackRedPages) * os::vm_page_size();
+  address low_addr = stack_end();
+  size_t len = stack_guard_zone_size();
 
   if (os::allocate_stack_guard_pages()) {
     if (os::remove_stack_guard_pages((char *) low_addr, len)) {
@@ -2563,18 +2566,18 @@
   disable_register_stack_guard();
 }
 
-void JavaThread::enable_stack_yellow_zone() {
+void JavaThread::enable_stack_yellow_reserved_zone() {
   assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
   assert(_stack_guard_state != stack_guard_enabled, "already enabled");
 
   // The base notation is from the stacks point of view, growing downward.
   // We need to adjust it to work correctly with guard_memory()
-  address base = stack_yellow_zone_base() - stack_yellow_zone_size();
+  address base = stack_red_zone_base();
 
   guarantee(base < stack_base(), "Error calculating stack yellow zone");
   guarantee(base < os::current_stack_pointer(), "Error calculating stack yellow zone");
 
-  if (os::guard_memory((char *) base, stack_yellow_zone_size())) {
+  if (os::guard_memory((char *) base, stack_yellow_reserved_zone_size())) {
     _stack_guard_state = stack_guard_enabled;
   } else {
     warning("Attempt to guard stack yellow zone failed.");
@@ -2582,19 +2585,19 @@
   enable_register_stack_guard();
 }
 
-void JavaThread::disable_stack_yellow_zone() {
+void JavaThread::disable_stack_yellow_reserved_zone() {
   assert(_stack_guard_state != stack_guard_unused, "must be using guard pages.");
-  assert(_stack_guard_state != stack_guard_yellow_disabled, "already disabled");
+  assert(_stack_guard_state != stack_guard_yellow_reserved_disabled, "already disabled");
 
   // Simply return if called for a thread that does not use guard pages.
   if (_stack_guard_state == stack_guard_unused) return;
 
   // The base notation is from the stacks point of view, growing downward.
   // We need to adjust it to work correctly with guard_memory()
-  address base = stack_yellow_zone_base() - stack_yellow_zone_size();
-
-  if (os::unguard_memory((char *)base, stack_yellow_zone_size())) {
-    _stack_guard_state = stack_guard_yellow_disabled;
+  address base = stack_red_zone_base();
+
+  if (os::unguard_memory((char *)base, stack_yellow_reserved_zone_size())) {
+    _stack_guard_state = stack_guard_yellow_reserved_disabled;
   } else {
     warning("Attempt to unguard stack yellow zone failed.");
   }
@@ -2899,7 +2902,7 @@
     st->print(", id=%d", osthread()->thread_id());
   }
   st->print(", stack(" PTR_FORMAT "," PTR_FORMAT ")",
-            p2i(_stack_base - _stack_size), p2i(_stack_base));
+            p2i(stack_end()), p2i(stack_base()));
   st->print("]");
   return;
 }
--- a/hotspot/src/share/vm/runtime/thread.hpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/share/vm/runtime/thread.hpp	Sun Dec 20 10:37:23 2015 -0500
@@ -549,15 +549,15 @@
  public:
   // Stack overflow support
   address stack_base() const           { assert(_stack_base != NULL,"Sanity check"); return _stack_base; }
-
   void    set_stack_base(address base) { _stack_base = base; }
   size_t  stack_size() const           { return _stack_size; }
   void    set_stack_size(size_t size)  { _stack_size = size; }
+  address stack_end()  const           { return stack_base() - stack_size(); }
   void    record_stack_base_and_size();
 
   bool    on_local_stack(address adr) const {
     // QQQ this has knowledge of direction, ought to be a stack method
-    return (_stack_base >= adr && adr >= (_stack_base - _stack_size));
+    return (_stack_base >= adr && adr >= stack_end());
   }
 
   uintptr_t self_raw_id()                    { return _self_raw_id; }
@@ -910,7 +910,7 @@
   enum StackGuardState {
     stack_guard_unused,         // not needed
     stack_guard_reserved_disabled,
-    stack_guard_yellow_disabled,// disabled (temporarily) after stack overflow
+    stack_guard_yellow_reserved_disabled,// disabled (temporarily) after stack overflow
     stack_guard_enabled         // enabled
   };
 
@@ -1344,32 +1344,138 @@
   }
 
   // Stack overflow support
+  //
+  //  (small addresses)
+  //
+  //  --  <-- stack_end()                   ---
+  //  |                                      |
+  //  |  red pages                           |
+  //  |                                      |
+  //  --  <-- stack_red_zone_base()          |
+  //  |                                      |
+  //  |                                     guard
+  //  |  yellow pages                       zone
+  //  |                                      |
+  //  |                                      |
+  //  --  <-- stack_yellow_zone_base()       |
+  //  |                                      |
+  //  |                                      |
+  //  |  reserved pages                      |
+  //  |                                      |
+  //  --  <-- stack_reserved_zone_base()    ---      ---
+  //                                                 /|\  shadow
+  //                                                  |   zone
+  //                                                 \|/  size
+  //  some untouched memory                          ---         <--  stack_overflow_limit()
+  //
+  //
+  //  --
+  //  |
+  //  |  shadow zone
+  //  |
+  //  --
+  //  x    frame n
+  //  --
+  //  x    frame n-1
+  //  x
+  //  --
+  //  ...
+  //
+  //  --
+  //  x    frame 0
+  //  --  <-- stack_base()
+  //
+  //  (large addresses)
+  //
+
+ private:
+  // These values are derived from flags StackRedPages, StackYellowPages,
+  // StackReservedPages and StackShadowPages. The zone size is determined
+  // ergonomically if page_size > 4K.
+  static size_t _stack_red_zone_size;
+  static size_t _stack_yellow_zone_size;
+  static size_t _stack_reserved_zone_size;
+  static size_t _stack_shadow_zone_size;
+ public:
   inline size_t stack_available(address cur_sp);
-  address stack_reserved_zone_base() {
-    return stack_yellow_zone_base(); }
-  size_t stack_reserved_zone_size() {
-    return StackReservedPages * os::vm_page_size(); }
-  address stack_yellow_zone_base() {
-    return (address)(stack_base() -
-                     (stack_size() -
-                     (stack_red_zone_size() + stack_yellow_zone_size())));
+
+  static size_t stack_red_zone_size() {
+    assert(_stack_red_zone_size > 0, "Don't call this before the field is initialized.");
+    return _stack_red_zone_size;
   }
-  size_t  stack_yellow_zone_size() {
-    return StackYellowPages * os::vm_page_size() + stack_reserved_zone_size();
+  static void set_stack_red_zone_size(size_t s) {
+    assert(is_size_aligned(s, os::vm_page_size()),
+           "We can not protect if the red zone size is not page aligned.");
+    assert(_stack_red_zone_size == 0, "This should be called only once.");
+    _stack_red_zone_size = s;
   }
   address stack_red_zone_base() {
-    return (address)(stack_base() - (stack_size() - stack_red_zone_size()));
-  }
-  size_t stack_red_zone_size() { return StackRedPages * os::vm_page_size(); }
-  bool in_stack_reserved_zone(address a) {
-    return (a <= stack_reserved_zone_base()) && (a >= (address)((intptr_t)stack_reserved_zone_base() - stack_reserved_zone_size()));
-  }
-  bool in_stack_yellow_zone(address a) {
-    return (a <= stack_yellow_zone_base()) && (a >= stack_red_zone_base());
+    return (address)(stack_end() + stack_red_zone_size());
   }
   bool in_stack_red_zone(address a) {
-    return (a <= stack_red_zone_base()) &&
-           (a >= (address)((intptr_t)stack_base() - stack_size()));
+    return a <= stack_red_zone_base() && a >= stack_end();
+  }
+
+  static size_t stack_yellow_zone_size() {
+    assert(_stack_yellow_zone_size > 0, "Don't call this before the field is initialized.");
+    return _stack_yellow_zone_size;
+  }
+  static void set_stack_yellow_zone_size(size_t s) {
+    assert(is_size_aligned(s, os::vm_page_size()),
+           "We can not protect if the yellow zone size is not page aligned.");
+    assert(_stack_yellow_zone_size == 0, "This should be called only once.");
+    _stack_yellow_zone_size = s;
+  }
+
+  static size_t stack_reserved_zone_size() {
+    // _stack_reserved_zone_size may be 0. This indicates the feature is off.
+    return _stack_reserved_zone_size;
+  }
+  static void set_stack_reserved_zone_size(size_t s) {
+    assert(is_size_aligned(s, os::vm_page_size()),
+           "We can not protect if the reserved zone size is not page aligned.");
+    assert(_stack_reserved_zone_size == 0, "This should be called only once.");
+    _stack_reserved_zone_size = s;
+  }
+  address stack_reserved_zone_base() {
+    return (address)(stack_end() +
+                     (stack_red_zone_size() + stack_yellow_zone_size() + stack_reserved_zone_size()));
+  }
+  bool in_stack_reserved_zone(address a) {
+    return (a <= stack_reserved_zone_base()) &&
+           (a >= (address)((intptr_t)stack_reserved_zone_base() - stack_reserved_zone_size()));
+  }
+
+  static size_t stack_yellow_reserved_zone_size() {
+    return _stack_yellow_zone_size + _stack_reserved_zone_size;
+  }
+  bool in_stack_yellow_reserved_zone(address a) {
+    return (a <= stack_reserved_zone_base()) && (a >= stack_red_zone_base());
+  }
+
+  // Size of red + yellow + reserved zones.
+  static size_t stack_guard_zone_size() {
+    return stack_red_zone_size() + stack_yellow_reserved_zone_size();
+  }
+
+  static size_t stack_shadow_zone_size() {
+    assert(_stack_shadow_zone_size > 0, "Don't call this before the field is initialized.");
+    return _stack_shadow_zone_size;
+  }
+  static void set_stack_shadow_zone_size(size_t s) {
+    // The shadow area is not allocated or protected, so
+    // it needs not be page aligned.
+    // But the stack bang currently assumes that it is a
+    // multiple of page size. This guarantees that the bang
+    // loop touches all pages in the shadow zone.
+    // This can be guaranteed differently, as well.  E.g., if
+    // the page size is a multiple of 4K, banging in 4K steps
+    // suffices to touch all pages. (Some pages are banged
+    // several times, though.)
+    assert(is_size_aligned(s, os::vm_page_size()),
+           "Stack bang assumes multiple of page size.");
+    assert(_stack_shadow_zone_size == 0, "This should be called only once.");
+    _stack_shadow_zone_size = s;
   }
 
   void create_stack_guard_pages();
@@ -1377,18 +1483,18 @@
 
   void enable_stack_reserved_zone();
   void disable_stack_reserved_zone();
-  void enable_stack_yellow_zone();
-  void disable_stack_yellow_zone();
+  void enable_stack_yellow_reserved_zone();
+  void disable_stack_yellow_reserved_zone();
   void enable_stack_red_zone();
   void disable_stack_red_zone();
 
   inline bool stack_guard_zone_unused();
-  inline bool stack_yellow_zone_disabled();
+  inline bool stack_yellow_reserved_zone_disabled();
   inline bool stack_reserved_zone_disabled();
   inline bool stack_guards_enabled();
 
   address reserved_stack_activation() const { return _reserved_stack_activation; }
-  void      set_reserved_stack_activation(address addr) {
+  void set_reserved_stack_activation(address addr) {
     assert(_reserved_stack_activation == stack_base()
             || _reserved_stack_activation == NULL
             || addr == stack_base(), "Must not be set twice");
@@ -1408,11 +1514,9 @@
 
   address stack_overflow_limit() { return _stack_overflow_limit; }
   void set_stack_overflow_limit() {
-    _stack_overflow_limit = _stack_base - _stack_size +
-                            ((StackShadowPages +
-                              StackReservedPages +
-                              StackYellowPages +
-                              StackRedPages) * os::vm_page_size());
+    _stack_overflow_limit = stack_end() +
+                            (JavaThread::stack_guard_zone_size() +
+                             JavaThread::stack_shadow_zone_size());
   }
 
   // Misc. accessors/mutators
--- a/hotspot/src/share/vm/runtime/thread.inline.hpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/share/vm/runtime/thread.inline.hpp	Sun Dec 20 10:37:23 2015 -0500
@@ -126,8 +126,8 @@
   return _stack_guard_state == stack_guard_unused;
 }
 
-inline bool JavaThread::stack_yellow_zone_disabled() {
-  return _stack_guard_state == stack_guard_yellow_disabled;
+inline bool JavaThread::stack_yellow_reserved_zone_disabled() {
+  return _stack_guard_state == stack_guard_yellow_reserved_disabled;
 }
 
 inline bool JavaThread::stack_reserved_zone_disabled() {
@@ -138,9 +138,9 @@
   // This code assumes java stacks grow down
   address low_addr; // Limit on the address for deepest stack depth
   if (_stack_guard_state == stack_guard_unused) {
-    low_addr =  stack_base() - stack_size();
+    low_addr = stack_end();
   } else {
-    low_addr = stack_yellow_zone_base();
+    low_addr = stack_reserved_zone_base();
   }
   return cur_sp > low_addr ? cur_sp - low_addr : 0;
 }
--- a/hotspot/src/share/vm/shark/sharkStack.cpp	Fri Dec 18 13:38:49 2015 +0000
+++ b/hotspot/src/share/vm/shark/sharkStack.cpp	Sun Dec 20 10:37:23 2015 -0500
@@ -133,7 +133,7 @@
   builder()->CreateCondBr(
     builder()->CreateICmpULT(
       free_stack,
-      LLVMValue::intptr_constant(StackShadowPages * os::vm_page_size())),
+      LLVMValue::intptr_constant(JavaThread::stack_shadow_zone_size())),
     overflow, abi_ok);
 
   // Handle overflows