Merge
authoramurillo
Fri, 10 Jun 2016 15:13:37 -0700
changeset 38944 f91311e54b41
parent 38921 d53037a90c44 (current diff)
parent 38943 2e5c855d6b1e (diff)
child 38945 6f7d687193a4
child 39269 fbef515ea566
Merge
--- a/hotspot/src/cpu/ppc/vm/assembler_ppc.hpp	Wed Jul 05 21:50:08 2017 +0200
+++ b/hotspot/src/cpu/ppc/vm/assembler_ppc.hpp	Fri Jun 10 15:13:37 2016 -0700
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
+ * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1530,6 +1530,10 @@
   inline void ld(   Register d, int si16,    Register s1);
   inline void ldu(  Register d, int si16,    Register s1);
 
+  // For convenience. Load pointer into d from b+s1.
+  inline void ld_ptr(Register d, int b, Register s1);
+  DEBUG_ONLY(inline void ld_ptr(Register d, ByteSize b, Register s1);)
+
   //  PPC 1, section 3.3.3 Fixed-Point Store Instructions
   inline void stwx( Register d, Register s1, Register s2);
   inline void stw(  Register d, int si16,    Register s1);
@@ -2194,7 +2198,8 @@
   void add( Register d, RegisterOrConstant roc, Register s1);
   void subf(Register d, RegisterOrConstant roc, Register s1);
   void cmpd(ConditionRegister d, RegisterOrConstant roc, Register s1);
-
+  // Load pointer d from s1+roc.
+  void ld_ptr(Register d, RegisterOrConstant roc, Register s1 = noreg) { ld(d, roc, s1); }
 
   // Emit several instructions to load a 64 bit constant. This issues a fixed
   // instruction pattern so that the constant can be patched later on.
--- a/hotspot/src/cpu/ppc/vm/assembler_ppc.inline.hpp	Wed Jul 05 21:50:08 2017 +0200
+++ b/hotspot/src/cpu/ppc/vm/assembler_ppc.inline.hpp	Fri Jun 10 15:13:37 2016 -0700
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
+ * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -328,6 +328,9 @@
 inline void Assembler::ldx(  Register d, Register s1, Register s2) { emit_int32(LDX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
 inline void Assembler::ldu(  Register d, int si16,    Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LDU_OPCODE | rt(d) | ds(si16) | rta0mem(s1));}
 
+inline void Assembler::ld_ptr(Register d, int b, Register s1) { ld(d, b, s1); }
+DEBUG_ONLY(inline void Assembler::ld_ptr(Register d, ByteSize b, Register s1) { ld(d, in_bytes(b), s1); })
+
 //  PPC 1, section 3.3.3 Fixed-Point Store Instructions
 inline void Assembler::stwx( Register d, Register s1, Register s2) { emit_int32(STWX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
 inline void Assembler::stw(  Register d, int si16,    Register s1) { emit_int32(STW_OPCODE  | rs(d) | d1(si16)   | ra0mem(s1));}
--- a/hotspot/src/cpu/ppc/vm/c1_LIRAssembler_ppc.cpp	Wed Jul 05 21:50:08 2017 +0200
+++ b/hotspot/src/cpu/ppc/vm/c1_LIRAssembler_ppc.cpp	Fri Jun 10 15:13:37 2016 -0700
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
+ * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1242,7 +1242,7 @@
 
 
 void LIR_Assembler::return_op(LIR_Opr result) {
-  const Register return_pc        = R11;
+  const Register return_pc        = R31;  // Must survive C-call to enable_stack_reserved_zone().
   const Register polling_page     = R12;
 
   // Pop the stack before the safepoint code.
@@ -1265,6 +1265,10 @@
   // Move return pc to LR.
   __ mtlr(return_pc);
 
+  if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
+    __ reserved_stack_check(return_pc);
+  }
+
   // We need to mark the code position where the load from the safepoint
   // polling page was emitted as relocInfo::poll_return_type here.
   __ relocate(relocInfo::poll_return_type);
--- a/hotspot/src/cpu/ppc/vm/globalDefinitions_ppc.hpp	Wed Jul 05 21:50:08 2017 +0200
+++ b/hotspot/src/cpu/ppc/vm/globalDefinitions_ppc.hpp	Fri Jun 10 15:13:37 2016 -0700
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
+ * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -52,4 +52,6 @@
 #define INCLUDE_RTM_OPT 1
 #endif
 
+#define SUPPORT_RESERVED_STACK_AREA
+
 #endif // CPU_PPC_VM_GLOBALDEFINITIONS_PPC_HPP
--- a/hotspot/src/cpu/ppc/vm/globals_ppc.hpp	Wed Jul 05 21:50:08 2017 +0200
+++ b/hotspot/src/cpu/ppc/vm/globals_ppc.hpp	Fri Jun 10 15:13:37 2016 -0700
@@ -43,7 +43,7 @@
 #define DEFAULT_STACK_YELLOW_PAGES (6)
 #define DEFAULT_STACK_RED_PAGES (1)
 #define DEFAULT_STACK_SHADOW_PAGES (6 DEBUG_ONLY(+2))
-#define DEFAULT_STACK_RESERVED_PAGES (0)
+#define DEFAULT_STACK_RESERVED_PAGES (1)
 
 #define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
 #define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
--- a/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.cpp	Wed Jul 05 21:50:08 2017 +0200
+++ b/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.cpp	Fri Jun 10 15:13:37 2016 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
+ * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -480,6 +480,7 @@
 
 void InterpreterMacroAssembler::generate_stack_overflow_check_with_compare_and_throw(Register Rmem_frame_size, Register Rscratch1) {
   Label done;
+  BLOCK_COMMENT("stack_overflow_check_with_compare_and_throw {");
   sub(Rmem_frame_size, R1_SP, Rmem_frame_size);
   ld(Rscratch1, thread_(stack_overflow_limit));
   cmpld(CCR0/*is_stack_overflow*/, Rmem_frame_size, Rscratch1);
@@ -501,6 +502,7 @@
 
   align(32, 12);
   bind(done);
+  BLOCK_COMMENT("} stack_overflow_check_with_compare_and_throw");
 }
 
 // Separate these two to allow for delay slot in middle.
@@ -805,16 +807,41 @@
 void InterpreterMacroAssembler::remove_activation(TosState state,
                                                   bool throw_monitor_exception,
                                                   bool install_monitor_exception) {
+  BLOCK_COMMENT("remove_activation {");
   unlock_if_synchronized_method(state, throw_monitor_exception, install_monitor_exception);
 
   // Save result (push state before jvmti call and pop it afterwards) and notify jvmti.
   notify_method_exit(false, state, NotifyJVMTI, true);
 
+  BLOCK_COMMENT("reserved_stack_check:");
+  if (StackReservedPages > 0) {
+    // Test if reserved zone needs to be enabled.
+    Label no_reserved_zone_enabling;
+
+    // Compare frame pointers. There is no good stack pointer, as with stack
+    // frame compression we can get different SPs when we do calls. A subsequent
+    // call could have a smaller SP, so that this compare succeeds for an
+    // inner call of the method annotated with ReservedStack.
+    ld_ptr(R0, JavaThread::reserved_stack_activation_offset(), R16_thread);
+    ld_ptr(R11_scratch1, _abi(callers_sp), R1_SP); // Load frame pointer.
+    cmpld(CCR0, R11_scratch1, R0);
+    blt_predict_taken(CCR0, no_reserved_zone_enabling);
+
+    // Enable reserved zone again, throw stack overflow exception.
+    call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), R16_thread);
+    call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_delayed_StackOverflowError));
+
+    should_not_reach_here();
+
+    bind(no_reserved_zone_enabling);
+  }
+
   verify_oop(R17_tos, state);
   verify_thread();
 
   merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
   mtlr(R0);
+  BLOCK_COMMENT("} remove_activation");
 }
 
 // Lock object
--- a/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp	Wed Jul 05 21:50:08 2017 +0200
+++ b/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp	Fri Jun 10 15:13:37 2016 -0700
@@ -1400,6 +1400,28 @@
 #endif
 }
 
+void MacroAssembler::reserved_stack_check(Register return_pc) {
+  // Test if reserved zone needs to be enabled.
+  Label no_reserved_zone_enabling;
+
+  ld_ptr(R0, JavaThread::reserved_stack_activation_offset(), R16_thread);
+  cmpld(CCR0, R1_SP, R0);
+  blt_predict_taken(CCR0, no_reserved_zone_enabling);
+
+  // Enable reserved zone again, throw stack overflow exception.
+  push_frame_reg_args(0, R0);
+  call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), R16_thread);
+  pop_frame();
+  mtlr(return_pc);
+  load_const_optimized(R0, StubRoutines::throw_delayed_StackOverflowError_entry());
+  mtctr(R0);
+  bctr();
+
+  should_not_reach_here();
+
+  bind(no_reserved_zone_enabling);
+}
+
 // CmpxchgX sets condition register to cmpX(current, compare).
 void MacroAssembler::cmpxchgw(ConditionRegister flag, Register dest_current_value,
                               Register compare_value, Register exchange_value,
--- a/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.hpp	Wed Jul 05 21:50:08 2017 +0200
+++ b/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.hpp	Fri Jun 10 15:13:37 2016 -0700
@@ -411,6 +411,10 @@
   // stdux, return the banged address. Otherwise, return 0.
   static address get_stack_bang_address(int instruction, void* ucontext);
 
+  // Check for reserved stack access in method being exited. If the reserved
+  // stack area was accessed, protect it again and throw StackOverflowError.
+  void reserved_stack_check(Register return_pc);
+
   // Atomics
   // CmpxchgX sets condition register to cmpX(current, compare).
   // (flag == ne) => (dest_current_value != compare_value), (!swapped)
--- a/hotspot/src/cpu/ppc/vm/ppc.ad	Wed Jul 05 21:50:08 2017 +0200
+++ b/hotspot/src/cpu/ppc/vm/ppc.ad	Fri Jun 10 15:13:37 2016 -0700
@@ -1432,7 +1432,7 @@
 
   const bool method_needs_polling = do_polling() && C->is_method_compilation();
   const bool method_is_frameless  = false /* TODO: PPC port C->is_frameless_method()*/;
-  const Register return_pc        = R11;
+  const Register return_pc        = R31;  // Must survive C-call to enable_stack_reserved_zone().
   const Register polling_page     = R12;
 
   if (!method_is_frameless) {
@@ -1456,6 +1456,10 @@
     __ addi(R1_SP, R1_SP, (int)framesize);
   }
 
+  if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
+    __ reserved_stack_check(return_pc);
+  }
+
   if (method_needs_polling) {
     // We need to mark the code position where the load from the safepoint
     // polling page was emitted as relocInfo::poll_return_type here.
--- a/hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp	Wed Jul 05 21:50:08 2017 +0200
+++ b/hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp	Fri Jun 10 15:13:37 2016 -0700
@@ -3082,6 +3082,9 @@
     StubRoutines::_throw_StackOverflowError_entry   =
       generate_throw_exception("StackOverflowError throw_exception",
                                CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false);
+    StubRoutines::_throw_delayed_StackOverflowError_entry =
+      generate_throw_exception("delayed StackOverflowError throw_exception",
+                               CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError), false);
 
     // CRC32 Intrinsics.
     if (UseCRC32Intrinsics) {
--- a/hotspot/src/os/aix/vm/os_aix.hpp	Wed Jul 05 21:50:08 2017 +0200
+++ b/hotspot/src/os/aix/vm/os_aix.hpp	Fri Jun 10 15:13:37 2016 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2013, 2015 SAP SE. All rights reserved.
+ * Copyright (c) 2013, 2016 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -128,6 +128,8 @@
   // Set PC into context. Needed for continuation after signal.
   static void ucontext_set_pc(ucontext_t* uc, address pc);
 
+  static bool get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr);
+
   // This boolean allows users to forward their own non-matching signals
   // to JVM_handle_aix_signal, harmlessly.
   static bool signal_handlers_are_installed;
--- a/hotspot/src/os_cpu/aix_ppc/vm/os_aix_ppc.cpp	Wed Jul 05 21:50:08 2017 +0200
+++ b/hotspot/src/os_cpu/aix_ppc/vm/os_aix_ppc.cpp	Fri Jun 10 15:13:37 2016 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2014 SAP SE. All rights reserved.
+ * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,7 @@
  */
 
 // no precompiled headers
-#include "assembler_ppc.inline.hpp"
+#include "asm/assembler.inline.hpp"
 #include "classfile/classLoader.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
@@ -145,6 +145,41 @@
   return fr;
 }
 
+bool os::Aix::get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr) {
+  address pc = (address) os::Aix::ucontext_get_pc(uc);
+  if (Interpreter::contains(pc)) {
+    // Interpreter performs stack banging after the fixed frame header has
+    // been generated while the compilers perform it before. To maintain
+    // semantic consistency between interpreted and compiled frames, the
+    // method returns the Java sender of the current frame.
+    *fr = os::fetch_frame_from_context(uc);
+    if (!fr->is_first_java_frame()) {
+      assert(fr->safe_for_sender(thread), "Safety check");
+      *fr = fr->java_sender();
+    }
+  } else {
+    // More complex code with compiled code.
+    assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
+    CodeBlob* cb = CodeCache::find_blob(pc);
+    if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
+      // Not sure where the pc points to, fallback to default
+      // stack overflow handling. In compiled code, we bang before
+      // the frame is complete.
+      return false;
+    } else {
+      intptr_t* sp = os::Aix::ucontext_get_sp(uc);
+      *fr = frame(sp, (address)*sp);
+      if (!fr->is_java_frame()) {
+        assert(fr->safe_for_sender(thread), "Safety check");
+        assert(!fr->is_first_frame(), "Safety check");
+        *fr = fr->java_sender();
+      }
+    }
+  }
+  assert(fr->is_java_frame(), "Safety check");
+  return true;
+}
+
 frame os::get_sender_for_C_frame(frame* fr) {
   if (*fr->sp() == NULL) {
     // fr is the last C frame
@@ -246,14 +281,32 @@
       // to continue with yellow zone disabled, but that doesn't buy us much and prevents
       // hs_err_pid files.
       if (thread->in_stack_yellow_reserved_zone(addr)) {
-        thread->disable_stack_yellow_reserved_zone();
         if (thread->thread_state() == _thread_in_Java) {
+            if (thread->in_stack_reserved_zone(addr)) {
+              frame fr;
+              if (os::Aix::get_frame_at_stack_banging_point(thread, uc, &fr)) {
+                assert(fr.is_java_frame(), "Must be a Javac frame");
+                frame activation =
+                  SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
+                if (activation.sp() != NULL) {
+                  thread->disable_stack_reserved_zone();
+                  if (activation.is_interpreted_frame()) {
+                    thread->set_reserved_stack_activation((address)activation.fp());
+                  } else {
+                    thread->set_reserved_stack_activation((address)activation.unextended_sp());
+                  }
+                  return 1;
+                }
+              }
+            }
           // Throw a stack overflow exception.
           // Guard pages will be reenabled while unwinding the stack.
+          thread->disable_stack_yellow_reserved_zone();
           stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
           goto run_stub;
         } else {
           // Thread was in the vm or native code. Return and try to finish.
+          thread->disable_stack_yellow_reserved_zone();
           return 1;
         }
       } else if (thread->in_stack_red_zone(addr)) {
--- a/hotspot/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp	Wed Jul 05 21:50:08 2017 +0200
+++ b/hotspot/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp	Fri Jun 10 15:13:37 2016 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
+ * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,7 @@
  */
 
 // no precompiled headers
-#include "assembler_ppc.inline.hpp"
+#include "asm/assembler.inline.hpp"
 #include "classfile/classLoader.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
@@ -157,6 +157,42 @@
   return frame(sp, epc.pc());
 }
 
+bool os::Linux::get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr) {
+  address pc = (address) os::Linux::ucontext_get_pc(uc);
+  if (Interpreter::contains(pc)) {
+    // Interpreter performs stack banging after the fixed frame header has
+    // been generated while the compilers perform it before. To maintain
+    // semantic consistency between interpreted and compiled frames, the
+    // method returns the Java sender of the current frame.
+    *fr = os::fetch_frame_from_context(uc);
+    if (!fr->is_first_java_frame()) {
+      assert(fr->safe_for_sender(thread), "Safety check");
+      *fr = fr->java_sender();
+    }
+  } else {
+    // More complex code with compiled code.
+    assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
+    CodeBlob* cb = CodeCache::find_blob(pc);
+    if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
+      // Not sure where the pc points to, fallback to default
+      // stack overflow handling. In compiled code, we bang before
+      // the frame is complete.
+      return false;
+    } else {
+      intptr_t* fp = os::Linux::ucontext_get_fp(uc);
+      intptr_t* sp = os::Linux::ucontext_get_sp(uc);
+      *fr = frame(sp, (address)*sp);
+      if (!fr->is_java_frame()) {
+        assert(fr->safe_for_sender(thread), "Safety check");
+        assert(!fr->is_first_frame(), "Safety check");
+        *fr = fr->java_sender();
+      }
+    }
+  }
+  assert(fr->is_java_frame(), "Safety check");
+  return true;
+}
+
 frame os::get_sender_for_C_frame(frame* fr) {
   if (*fr->sp() == 0) {
     // fr is the last C frame
@@ -243,13 +279,31 @@
       if (thread->on_local_stack(addr)) {
         // stack overflow
         if (thread->in_stack_yellow_reserved_zone(addr)) {
-          thread->disable_stack_yellow_reserved_zone();
           if (thread->thread_state() == _thread_in_Java) {
+            if (thread->in_stack_reserved_zone(addr)) {
+              frame fr;
+              if (os::Linux::get_frame_at_stack_banging_point(thread, uc, &fr)) {
+                assert(fr.is_java_frame(), "Must be a Javac frame");
+                frame activation =
+                  SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
+                if (activation.sp() != NULL) {
+                  thread->disable_stack_reserved_zone();
+                  if (activation.is_interpreted_frame()) {
+                    thread->set_reserved_stack_activation((address)activation.fp());
+                  } else {
+                    thread->set_reserved_stack_activation((address)activation.unextended_sp());
+                  }
+                  return 1;
+                }
+              }
+            }
             // Throw a stack overflow exception.
             // Guard pages will be reenabled while unwinding the stack.
+            thread->disable_stack_yellow_reserved_zone();
             stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
           } else {
             // Thread was in the vm or native code. Return and try to finish.
+            thread->disable_stack_yellow_reserved_zone();
             return 1;
           }
         } else if (thread->in_stack_red_zone(addr)) {
--- a/hotspot/src/share/vm/classfile/classLoaderData.cpp	Wed Jul 05 21:50:08 2017 +0200
+++ b/hotspot/src/share/vm/classfile/classLoaderData.cpp	Fri Jun 10 15:13:37 2016 -0700
@@ -67,6 +67,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/jniHandles.hpp"
 #include "runtime/mutex.hpp"
+#include "runtime/orderAccess.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/synchronizer.hpp"
 #include "utilities/growableArray.hpp"
@@ -76,6 +77,11 @@
 #include "trace/tracing.hpp"
 #endif
 
+// helper function to avoid in-line casts
+template <typename T> static T* load_ptr_acquire(T* volatile *p) {
+  return static_cast<T*>(OrderAccess::load_ptr_acquire(p));
+}
+
 ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL;
 
 ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) :
@@ -147,20 +153,23 @@
 }
 
 void ClassLoaderData::classes_do(KlassClosure* klass_closure) {
-  for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
+  // Lock-free access requires load_ptr_acquire
+  for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
     klass_closure->do_klass(k);
     assert(k != k->next_link(), "no loops!");
   }
 }
 
 void ClassLoaderData::classes_do(void f(Klass * const)) {
+  assert_locked_or_safepoint(_metaspace_lock);
   for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
     f(k);
   }
 }
 
 void ClassLoaderData::methods_do(void f(Method*)) {
-  for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
+  // Lock-free access requires load_ptr_acquire
+  for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
     if (k->is_instance_klass()) {
       InstanceKlass::cast(k)->methods_do(f);
     }
@@ -179,7 +188,8 @@
 }
 
 void ClassLoaderData::classes_do(void f(InstanceKlass*)) {
-  for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
+  // Lock-free access requires load_ptr_acquire
+  for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
     if (k->is_instance_klass()) {
       f(InstanceKlass::cast(k));
     }
@@ -188,6 +198,7 @@
 }
 
 void ClassLoaderData::modules_do(void f(ModuleEntry*)) {
+  assert_locked_or_safepoint(Module_lock);
   if (_modules != NULL) {
     for (int i = 0; i < _modules->table_size(); i++) {
       for (ModuleEntry* entry = _modules->bucket(i);
@@ -200,9 +211,11 @@
 }
 
 void ClassLoaderData::packages_do(void f(PackageEntry*)) {
-  if (_packages != NULL) {
-    for (int i = 0; i < _packages->table_size(); i++) {
-      for (PackageEntry* entry = _packages->bucket(i);
+  // Lock-free access requires load_ptr_acquire
+  PackageEntryTable* packages = load_ptr_acquire(&_packages);
+  if (packages != NULL) {
+    for (int i = 0; i < packages->table_size(); i++) {
+      for (PackageEntry* entry = packages->bucket(i);
                               entry != NULL;
                               entry = entry->next()) {
         f(entry);
@@ -325,10 +338,9 @@
     MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
     Klass* old_value = _klasses;
     k->set_next_link(old_value);
-    // Make sure linked class is stable, since the class list is walked without a lock
-    OrderAccess::storestore();
-    // link the new item into the list
-    _klasses = k;
+    // Link the new item into the list, making sure the linked class is stable
+    // since the list can be walked without a lock
+    OrderAccess::release_store_ptr(&_klasses, k);
   }
 
   if (publicize && k->class_loader_data() != NULL) {
@@ -343,11 +355,10 @@
   }
 }
 
-// This is called by InstanceKlass::deallocate_contents() to remove the
-// scratch_class for redefine classes.  We need a lock because there it may not
-// be called at a safepoint if there's an error.
+// Remove a klass from the _klasses list for scratch_class during redefinition
+// or parsed class in the case of an error.
 void ClassLoaderData::remove_class(Klass* scratch_class) {
-  MutexLockerEx ml(metaspace_lock(),  Mutex::_no_safepoint_check_flag);
+  assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
   Klass* prev = NULL;
   for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
     if (k == scratch_class) {
@@ -390,42 +401,46 @@
 
 PackageEntryTable* ClassLoaderData::packages() {
   // Lazily create the package entry table at first request.
-  if (_packages == NULL) {
+  // Lock-free access requires load_ptr_acquire.
+  PackageEntryTable* packages = load_ptr_acquire(&_packages);
+  if (packages == NULL) {
     MutexLockerEx m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
     // Check if _packages got allocated while we were waiting for this lock.
-    if (_packages == NULL) {
-      _packages = new PackageEntryTable(PackageEntryTable::_packagetable_entry_size);
+    if ((packages = _packages) == NULL) {
+      packages = new PackageEntryTable(PackageEntryTable::_packagetable_entry_size);
+      // Ensure _packages is stable, since it is examined without a lock
+      OrderAccess::release_store_ptr(&_packages, packages);
     }
   }
-  return _packages;
+  return packages;
 }
 
 ModuleEntryTable* ClassLoaderData::modules() {
   // Lazily create the module entry table at first request.
-  if (_modules == NULL) {
+  // Lock-free access requires load_ptr_acquire.
+  ModuleEntryTable* modules = load_ptr_acquire(&_modules);
+  if (modules == NULL) {
     MutexLocker m1(Module_lock);
-    // Check again if _modules has been allocated while we were getting this lock.
-    if (_modules != NULL) {
-      return _modules;
-    }
+    // Check if _modules got allocated while we were waiting for this lock.
+    if ((modules = _modules) == NULL) {
+      modules = new ModuleEntryTable(ModuleEntryTable::_moduletable_entry_size);
+      // Each loader has one unnamed module entry. Create it before
+      // any classes, loaded by this loader, are defined in case
+      // they end up being defined in loader's unnamed module.
+      modules->create_unnamed_module(this);
 
-    ModuleEntryTable* temp_table = new ModuleEntryTable(ModuleEntryTable::_moduletable_entry_size);
-    // Each loader has one unnamed module entry. Create it before
-    // any classes, loaded by this loader, are defined in case
-    // they end up being defined in loader's unnamed module.
-    temp_table->create_unnamed_module(this);
-
-    {
-      MutexLockerEx m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
-      // Ensure _modules is stable, since it is examined without a lock
-      OrderAccess::storestore();
-      _modules = temp_table;
+      {
+        MutexLockerEx m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
+        // Ensure _modules is stable, since it is examined without a lock
+        OrderAccess::release_store_ptr(&_modules, modules);
+      }
     }
   }
-  return _modules;
+  return modules;
 }
 
 oop ClassLoaderData::keep_alive_object() const {
+  assert_locked_or_safepoint(_metaspace_lock);
   assert(!keep_alive(), "Don't use with CLDs that are artificially kept alive");
   return is_anonymous() ? _klasses->java_mirror() : class_loader();
 }
@@ -499,30 +514,33 @@
   // to create smaller arena for Reflection class loaders also.
   // The reason for the delayed allocation is because some class loaders are
   // simply for delegating with no metadata of their own.
-  if (_metaspace == NULL) {
-    MutexLockerEx ml(metaspace_lock(),  Mutex::_no_safepoint_check_flag);
-    // Check again if metaspace has been allocated while we were getting this lock.
-    if (_metaspace != NULL) {
-      return _metaspace;
-    }
-    if (this == the_null_class_loader_data()) {
-      assert (class_loader() == NULL, "Must be");
-      set_metaspace(new Metaspace(_metaspace_lock, Metaspace::BootMetaspaceType));
-    } else if (is_anonymous()) {
-      if (class_loader() != NULL) {
-        log_trace(class, loader, data)("is_anonymous: %s", class_loader()->klass()->internal_name());
+  // Lock-free access requires load_ptr_acquire.
+  Metaspace* metaspace = load_ptr_acquire(&_metaspace);
+  if (metaspace == NULL) {
+    MutexLockerEx ml(_metaspace_lock,  Mutex::_no_safepoint_check_flag);
+    // Check if _metaspace got allocated while we were waiting for this lock.
+    if ((metaspace = _metaspace) == NULL) {
+      if (this == the_null_class_loader_data()) {
+        assert (class_loader() == NULL, "Must be");
+        metaspace = new Metaspace(_metaspace_lock, Metaspace::BootMetaspaceType);
+      } else if (is_anonymous()) {
+        if (class_loader() != NULL) {
+          log_trace(class, loader, data)("is_anonymous: %s", class_loader()->klass()->internal_name());
+        }
+        metaspace = new Metaspace(_metaspace_lock, Metaspace::AnonymousMetaspaceType);
+      } else if (class_loader()->is_a(SystemDictionary::reflect_DelegatingClassLoader_klass())) {
+        if (class_loader() != NULL) {
+          log_trace(class, loader, data)("is_reflection: %s", class_loader()->klass()->internal_name());
+        }
+        metaspace = new Metaspace(_metaspace_lock, Metaspace::ReflectionMetaspaceType);
+      } else {
+        metaspace = new Metaspace(_metaspace_lock, Metaspace::StandardMetaspaceType);
       }
-      set_metaspace(new Metaspace(_metaspace_lock, Metaspace::AnonymousMetaspaceType));
-    } else if (class_loader()->is_a(SystemDictionary::reflect_DelegatingClassLoader_klass())) {
-      if (class_loader() != NULL) {
-        log_trace(class, loader, data)("is_reflection: %s", class_loader()->klass()->internal_name());
-      }
-      set_metaspace(new Metaspace(_metaspace_lock, Metaspace::ReflectionMetaspaceType));
-    } else {
-      set_metaspace(new Metaspace(_metaspace_lock, Metaspace::StandardMetaspaceType));
+      // Ensure _metaspace is stable, since it is examined without a lock
+      OrderAccess::release_store_ptr(&_metaspace, metaspace);
     }
   }
-  return _metaspace;
+  return metaspace;
 }
 
 JNIHandleBlock* ClassLoaderData::handles() const           { return _handles; }
@@ -638,6 +656,7 @@
 #endif // PRODUCT
 
 void ClassLoaderData::verify() {
+  assert_locked_or_safepoint(_metaspace_lock);
   oop cl = class_loader();
 
   guarantee(this == class_loader_data(cl) || is_anonymous(), "Must be the same");
@@ -656,7 +675,8 @@
 }
 
 bool ClassLoaderData::contains_klass(Klass* klass) {
-  for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
+  // Lock-free access requires load_ptr_acquire
+  for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
     if (k == klass) return true;
   }
   return false;
@@ -1046,6 +1066,7 @@
 
   // Find the first klass in the CLDG.
   while (cld != NULL) {
+    assert_locked_or_safepoint(cld->metaspace_lock());
     klass = cld->_klasses;
     if (klass != NULL) {
       _next_klass = klass;
@@ -1063,6 +1084,7 @@
 
   // No more klasses in the current CLD. Time to find a new CLD.
   ClassLoaderData* cld = klass->class_loader_data();
+  assert_locked_or_safepoint(cld->metaspace_lock());
   while (next == NULL) {
     cld = cld->next();
     if (cld == NULL) {
--- a/hotspot/src/share/vm/classfile/classLoaderData.hpp	Wed Jul 05 21:50:08 2017 +0200
+++ b/hotspot/src/share/vm/classfile/classLoaderData.hpp	Fri Jun 10 15:13:37 2016 -0700
@@ -171,8 +171,8 @@
   Dependencies _dependencies; // holds dependencies from this class loader
                               // data to others.
 
-  Metaspace * _metaspace;  // Meta-space where meta-data defined by the
-                           // classes in the class loader are allocated.
+  Metaspace * volatile _metaspace;  // Meta-space where meta-data defined by the
+                                    // classes in the class loader are allocated.
   Mutex* _metaspace_lock;  // Locks the metaspace for allocations and setup.
   bool _unloading;         // true if this class loader goes away
   bool _is_anonymous;      // if this CLD is for an anonymous class
@@ -186,9 +186,9 @@
   JNIHandleBlock* _handles; // Handles to constant pool arrays, Modules, etc, which
                             // have the same life cycle of the corresponding ClassLoader.
 
-  Klass* _klasses;         // The classes defined by the class loader.
-  PackageEntryTable* _packages; // The packages defined by the class loader.
-  ModuleEntryTable* _modules;   // The modules defined by the class loader.
+  Klass* volatile _klasses;              // The classes defined by the class loader.
+  PackageEntryTable* volatile _packages; // The packages defined by the class loader.
+  ModuleEntryTable* volatile _modules;   // The modules defined by the class loader.
 
   // These method IDs are created for the class loader and set to NULL when the
   // class loader is unloaded.  They are rarely freed, only for redefine classes
@@ -216,8 +216,6 @@
   ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies);
   ~ClassLoaderData();
 
-  void set_metaspace(Metaspace* m) { _metaspace = m; }
-
   JNIHandleBlock* handles() const;
   void set_handles(JNIHandleBlock* handles);
 
--- a/hotspot/src/share/vm/classfile/packageEntry.cpp	Wed Jul 05 21:50:08 2017 +0200
+++ b/hotspot/src/share/vm/classfile/packageEntry.cpp	Fri Jun 10 15:13:37 2016 -0700
@@ -34,15 +34,13 @@
 #include "utilities/hashtable.inline.hpp"
 #include "utilities/ostream.hpp"
 
-// Return true if this package is exported to m.
+// Returns true if this package specifies m as a qualified export, including through an unnamed export
 bool PackageEntry::is_qexported_to(ModuleEntry* m) const {
   assert(m != NULL, "No module to lookup in this package's qualified exports list");
   MutexLocker m1(Module_lock);
-  if (!_is_exported) {
-    return false;
-  } else if (_is_exported_allUnnamed && !m->is_named()) {
+  if (is_exported_allUnnamed() && !m->is_named()) {
     return true;
-  } else if (_qualified_exports == NULL) {
+  } else if (!has_qual_exports_list()) {
     return false;
   } else {
     return _qualified_exports->contains(m);
@@ -52,8 +50,7 @@
 // Add a module to the package's qualified export list.
 void PackageEntry::add_qexport(ModuleEntry* m) {
   assert_locked_or_safepoint(Module_lock);
-  assert(_is_exported == true, "Adding a qualified export to a package that is not exported");
-  if (_qualified_exports == NULL) {
+  if (!has_qual_exports_list()) {
     // Lazily create a package's qualified exports list.
     // Initial size is small, do not anticipate export lists to be large.
     _qualified_exports =
@@ -62,7 +59,7 @@
   _qualified_exports->append_if_missing(m);
 }
 
-// Set the package's exported state based on the value of the ModuleEntry.
+// Set the package's exported states based on the value of the ModuleEntry.
 void PackageEntry::set_exported(ModuleEntry* m) {
   MutexLocker m1(Module_lock);
   if (is_unqual_exported()) {
@@ -73,7 +70,7 @@
 
   if (m == NULL) {
     // NULL indicates the package is being unqualifiedly exported
-    if (_is_exported && _qualified_exports != NULL) {
+    if (has_qual_exports_list()) {
       // Legit to transition a package from being qualifiedly exported
       // to unqualified.  Clean up the qualified lists at the next
       // safepoint.
@@ -85,11 +82,17 @@
 
   } else {
     // Add the exported module
-    _is_exported = true;
     add_qexport(m);
   }
 }
 
+void PackageEntry::set_is_exported_allUnnamed() {
+  MutexLocker m1(Module_lock);
+  if (!is_unqual_exported()) {
+   _is_exported_allUnnamed = true;
+  }
+}
+
 // Remove dead module entries within the package's exported list.
 void PackageEntry::purge_qualified_exports() {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
@@ -170,7 +173,7 @@
   if (!module->is_named()) {
     // Set the exported state to true because all packages
     // within the unnamed module are unqualifiedly exported
-    entry->set_exported(true);
+    entry->set_unqual_exported();
   }
   entry->set_module(module);
   return entry;
@@ -248,6 +251,20 @@
 
 }
 
+// iteration of qualified exports
+void PackageEntry::package_exports_do(ModuleClosure* const f) {
+  assert_locked_or_safepoint(Module_lock);
+  assert(f != NULL, "invariant");
+
+  if (has_qual_exports_list()) {
+    int qe_len = _qualified_exports->length();
+
+    for (int i = 0; i < qe_len; ++i) {
+      f->do_module(_qualified_exports->at(i));
+    }
+  }
+}
+
 // Remove dead entries from all packages' exported list
 void PackageEntryTable::purge_all_package_exports() {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
@@ -281,10 +298,10 @@
 void PackageEntry::print(outputStream* st) {
   ResourceMark rm;
   st->print_cr("package entry "PTR_FORMAT" name %s module %s classpath_index "
-               INT32_FORMAT " is_exported %d is_exported_allUnnamed %d " "next "PTR_FORMAT,
+               INT32_FORMAT " is_exported_unqualified %d is_exported_allUnnamed %d " "next "PTR_FORMAT,
                p2i(this), name()->as_C_string(),
                (module()->is_named() ? module()->name()->as_C_string() : UNNAMED_MODULE),
-               _classpath_index, _is_exported, _is_exported_allUnnamed, p2i(next()));
+               _classpath_index, _is_exported_unqualified, _is_exported_allUnnamed, p2i(next()));
 }
 
 void PackageEntryTable::verify() {
@@ -305,17 +322,3 @@
 void PackageEntry::verify() {
   guarantee(name() != NULL, "A package entry must have a corresponding symbol name.");
 }
-
-// iteration of qualified exports
-void PackageEntry::package_exports_do(ModuleClosure* const f) {
-  assert_locked_or_safepoint(Module_lock);
-  assert(f != NULL, "invariant");
-
-  if (is_qual_exported()) {
-    int qe_len = _qualified_exports->length();
-
-    for (int i = 0; i < qe_len; ++i) {
-      f->do_module(_qualified_exports->at(i));
-    }
-  }
-}
--- a/hotspot/src/share/vm/classfile/packageEntry.hpp	Wed Jul 05 21:50:08 2017 +0200
+++ b/hotspot/src/share/vm/classfile/packageEntry.hpp	Fri Jun 10 15:13:37 2016 -0700
@@ -34,16 +34,32 @@
 // A PackageEntry basically represents a Java package.  It contains:
 //   - Symbol* containing the package's name.
 //   - ModuleEntry* for this package's containing module.
-//   - a flag indicating if package is exported, either qualifiedly or
-//     unqualifiedly.
+//   - a flag indicating if package is exported unqualifiedly
 //   - a flag indicating if this package is exported to all unnamed modules.
 //   - a growable array containing other module entries that this
 //     package is exported to.
 //
-// Packages that are:
-//   - not exported:        _qualified_exports = NULL  && _is_exported is false
-//   - qualified exports:   (_qualified_exports != NULL || _is_exported_allUnnamed is true) && _is_exported is true
-//   - unqualified exports: (_qualified_exports = NULL && _is_exported_allUnnamed is false) && _is_exported is true
+// Packages can be exported in the following 3 ways:
+//   - not exported:        the package has not been explicitly qualified to a
+//                            particular module nor has it been specified to be
+//                            unqualifiedly exported to all modules. If all states
+//                            of exportedness are false, the package is considered
+//                            not exported.
+//   - qualified exports:   the package has been explicitly qualified to at least
+//                            one particular module or has been qualifiedly exported
+//                            to all unnamed modules.
+//                            Note: _is_exported_allUnnamed is a form of a qualified
+//                            export. It is equivalent to the package being
+//                            explicitly exported to all current and future unnamed modules.
+//   - unqualified exports: the package is exported to all modules.
+//
+// A package can transition from:
+//   - being not exported, to being exported either in a qualified or unqualified manner
+//   - being qualifiedly exported, to unqualifiedly exported. Its exported scope is widened.
+//
+// A package cannot transition from:
+//   - being unqualifiedly exported, to exported qualifiedly to a specific module.
+//       This transition attempt is silently ignored in set_exported.
 //
 // The Mutex Module_lock is shared between ModuleEntry and PackageEntry, to lock either
 // data structure.
@@ -55,7 +71,7 @@
   // loaded by the boot loader from -Xbootclasspath/a in an unnamed module, it
   // indicates from which class path entry.
   s2 _classpath_index;
-  bool _is_exported;
+  bool _is_exported_unqualified;
   bool _is_exported_allUnnamed;
   GrowableArray<ModuleEntry*>* _exported_pending_delete; // transitioned from qualified to unqualified, delete at safepoint
   GrowableArray<ModuleEntry*>* _qualified_exports;
@@ -68,7 +84,7 @@
   void init() {
     _module = NULL;
     _classpath_index = -1;
-    _is_exported = false;
+    _is_exported_unqualified = false;
     _is_exported_allUnnamed = false;
     _exported_pending_delete = NULL;
     _qualified_exports = NULL;
@@ -83,34 +99,41 @@
   void               set_module(ModuleEntry* m) { _module = m; }
 
   // package's export state
-  bool is_exported() const { return _is_exported; } // qualifiedly or unqualifiedly exported
+  bool is_exported() const { // qualifiedly or unqualifiedly exported
+      return (is_unqual_exported() || has_qual_exports_list() || is_exported_allUnnamed());
+  }
+  // Returns true if the package has any explicit qualified exports or is exported to all unnamed
   bool is_qual_exported() const {
-    return (_is_exported && (_qualified_exports != NULL || _is_exported_allUnnamed));
+    return (has_qual_exports_list() || is_exported_allUnnamed());
+  }
+  // Returns true if there are any explicit qualified exports
+  bool has_qual_exports_list() const {
+    assert(!(_qualified_exports != NULL && _is_exported_unqualified),
+           "_qualified_exports set at same time as _is_exported_unqualified");
+    return (_qualified_exports != NULL);
+  }
+  bool is_exported_allUnnamed() const {
+    assert(!(_is_exported_allUnnamed && _is_exported_unqualified),
+           "_is_exported_allUnnamed set at same time as _is_exported_unqualified");
+    return _is_exported_allUnnamed;
   }
   bool is_unqual_exported() const {
-    return (_is_exported && (_qualified_exports == NULL && !_is_exported_allUnnamed));
+    assert(!(_qualified_exports != NULL && _is_exported_unqualified),
+           "_qualified_exports set at same time as _is_exported_unqualified");
+    assert(!(_is_exported_allUnnamed && _is_exported_unqualified),
+           "_is_exported_allUnnamed set at same time as _is_exported_unqualified");
+    return _is_exported_unqualified;
   }
   void set_unqual_exported() {
-    _is_exported = true;
+    _is_exported_unqualified = true;
     _is_exported_allUnnamed = false;
     _qualified_exports = NULL;
   }
   bool exported_pending_delete() const     { return (_exported_pending_delete != NULL); }
 
-  void set_exported(bool e)                { _is_exported = e; }
   void set_exported(ModuleEntry* m);
 
-  void set_is_exported_allUnnamed() {
-    if (!is_unqual_exported()) {
-     _is_exported_allUnnamed = true;
-     _is_exported = true;
-    }
-  }
-  bool is_exported_allUnnamed() const {
-    assert(_is_exported || !_is_exported_allUnnamed,
-           "is_allUnnamed set without is_exported being set");
-    return _is_exported_allUnnamed;
-  }
+  void set_is_exported_allUnnamed();
 
   void set_classpath_index(s2 classpath_index) {
     _classpath_index = classpath_index;
@@ -122,7 +145,7 @@
   // returns true if the package is defined in the unnamed module
   bool in_unnamed_module() const  { return !_module->is_named(); }
 
-  // returns true if the package specifies m as a qualified export
+  // returns true if the package specifies m as a qualified export, including through an unnamed export
   bool is_qexported_to(ModuleEntry* m) const;
 
   // add the module to the package's qualified exports
--- a/hotspot/src/share/vm/interpreter/linkResolver.cpp	Wed Jul 05 21:50:08 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/linkResolver.cpp	Fri Jun 10 15:13:37 2016 -0700
@@ -966,20 +966,18 @@
   methodHandle resolved_method = linktime_resolve_static_method(link_info, CHECK);
 
   // The resolved class can change as a result of this resolution.
-  KlassHandle resolved_klass = KlassHandle(THREAD, resolved_method->method_holder());
+  KlassHandle resolved_klass(THREAD, resolved_method->method_holder());
 
-  Method* save_resolved_method = resolved_method();
   // Initialize klass (this should only happen if everything is ok)
   if (initialize_class && resolved_klass->should_be_initialized()) {
     resolved_klass->initialize(CHECK);
-    // Use updated LinkInfo (to reresolve with resolved_klass as method_holder?)
+    // Use updated LinkInfo to reresolve with resolved method holder
     LinkInfo new_info(resolved_klass, link_info.name(), link_info.signature(),
                       link_info.current_klass(),
                       link_info.check_access() ? LinkInfo::needs_access_check : LinkInfo::skip_access_check);
     resolved_method = linktime_resolve_static_method(new_info, CHECK);
   }
 
-  assert(save_resolved_method == resolved_method(), "does this change?");
   // setup result
   result.set_static(resolved_klass, resolved_method, CHECK);
 }
--- a/hotspot/src/share/vm/memory/metaspaceShared.cpp	Wed Jul 05 21:50:08 2017 +0200
+++ b/hotspot/src/share/vm/memory/metaspaceShared.cpp	Fri Jun 10 15:13:37 2016 -0700
@@ -191,7 +191,12 @@
     case Bytecodes::_getfield:      *bcs.bcp() = Bytecodes::_nofast_getfield;      break;
     case Bytecodes::_putfield:      *bcs.bcp() = Bytecodes::_nofast_putfield;      break;
     case Bytecodes::_aload_0:       *bcs.bcp() = Bytecodes::_nofast_aload_0;       break;
-    case Bytecodes::_iload:         *bcs.bcp() = Bytecodes::_nofast_iload;         break;
+    case Bytecodes::_iload: {
+      if (!bcs.is_wide()) {
+        *bcs.bcp() = Bytecodes::_nofast_iload;
+      }
+      break;
+    }
     default: break;
     }
   }
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp	Wed Jul 05 21:50:08 2017 +0200
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp	Fri Jun 10 15:13:37 2016 -0700
@@ -1104,21 +1104,21 @@
 
 void InstanceKlass::mask_for(const methodHandle& method, int bci,
   InterpreterOopMap* entry_for) {
-  // Dirty read, then double-check under a lock.
-  if (_oop_map_cache == NULL) {
-    // Otherwise, allocate a new one.
+  // Lazily create the _oop_map_cache at first request
+  // Lock-free access requires load_ptr_acquire.
+  OopMapCache* oop_map_cache =
+      static_cast<OopMapCache*>(OrderAccess::load_ptr_acquire(&_oop_map_cache));
+  if (oop_map_cache == NULL) {
     MutexLocker x(OopMapCacheAlloc_lock);
-    // First time use. Allocate a cache in C heap
-    if (_oop_map_cache == NULL) {
-      // Release stores from OopMapCache constructor before assignment
-      // to _oop_map_cache. C++ compilers on ppc do not emit the
-      // required memory barrier only because of the volatile
-      // qualifier of _oop_map_cache.
-      OrderAccess::release_store_ptr(&_oop_map_cache, new OopMapCache());
+    // Check if _oop_map_cache was allocated while we were waiting for this lock
+    if ((oop_map_cache = _oop_map_cache) == NULL) {
+      oop_map_cache = new OopMapCache();
+      // Ensure _oop_map_cache is stable, since it is examined without a lock
+      OrderAccess::release_store_ptr(&_oop_map_cache, oop_map_cache);
     }
   }
-  // _oop_map_cache is constant after init; lookup below does is own locking.
-  _oop_map_cache->lookup(method, bci, entry_for);
+  // _oop_map_cache is constant after init; lookup below does its own locking.
+  oop_map_cache->lookup(method, bci, entry_for);
 }
 
 
--- a/hotspot/src/share/vm/services/virtualMemoryTracker.cpp	Wed Jul 05 21:50:08 2017 +0200
+++ b/hotspot/src/share/vm/services/virtualMemoryTracker.cpp	Fri Jun 10 15:13:37 2016 -0700
@@ -23,7 +23,10 @@
  */
 #include "precompiled.hpp"
 
+#include "runtime/atomic.inline.hpp"
+#include "runtime/os.hpp"
 #include "runtime/threadCritical.hpp"
+#include "services/memTracker.hpp"
 #include "services/virtualMemoryTracker.hpp"
 
 size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
@@ -52,46 +55,41 @@
   if (all_committed()) return true;
 
   CommittedMemoryRegion committed_rgn(addr, size, stack);
-  LinkedListNode<CommittedMemoryRegion>* node = _committed_regions.find_node(committed_rgn);
-  if (node != NULL) {
+  LinkedListNode<CommittedMemoryRegion>* node = _committed_regions.head();
+
+  while (node != NULL) {
     CommittedMemoryRegion* rgn = node->data();
     if (rgn->same_region(addr, size)) {
       return true;
     }
 
     if (rgn->adjacent_to(addr, size)) {
-      // check if the next region covers this committed region,
-      // the regions may not be merged due to different call stacks
-      LinkedListNode<CommittedMemoryRegion>* next =
-        node->next();
-      if (next != NULL && next->data()->contain_region(addr, size)) {
-        if (next->data()->same_region(addr, size)) {
-          next->data()->set_call_stack(stack);
-        }
-        return true;
-      }
-      if (rgn->call_stack()->equals(stack)) {
+      // special case to expand prior region if there is no next region
+      LinkedListNode<CommittedMemoryRegion>* next = node->next();
+      if (next == NULL && rgn->call_stack()->equals(stack)) {
         VirtualMemorySummary::record_uncommitted_memory(rgn->size(), flag());
         // the two adjacent regions have the same call stack, merge them
         rgn->expand_region(addr, size);
         VirtualMemorySummary::record_committed_memory(rgn->size(), flag());
         return true;
       }
-      VirtualMemorySummary::record_committed_memory(size, flag());
-      if (rgn->base() > addr) {
-        return _committed_regions.insert_before(committed_rgn, node) != NULL;
-      } else {
-        return _committed_regions.insert_after(committed_rgn, node) != NULL;
       }
+
+    if (rgn->overlap_region(addr, size)) {
+      // Clear a space for this region in the case it overlaps with any regions.
+      remove_uncommitted_region(addr, size);
+      break;  // commit below
     }
-    assert(rgn->contain_region(addr, size), "Must cover this region");
-    return true;
-  } else {
+    if (rgn->end() >= addr + size){
+      break;
+    }
+    node = node->next();
+  }
+
     // New committed region
     VirtualMemorySummary::record_committed_memory(size, flag());
     return add_committed_region(committed_rgn);
   }
-}
 
 void ReservedMemoryRegion::set_all_committed(bool b) {
   if (all_committed() != b) {
@@ -175,48 +173,52 @@
       }
     }
   } else {
-    // we have to walk whole list to remove the committed regions in
-    // specified range
-    LinkedListNode<CommittedMemoryRegion>* head =
-      _committed_regions.head();
-    LinkedListNode<CommittedMemoryRegion>* prev = NULL;
-    VirtualMemoryRegion uncommitted_rgn(addr, sz);
+    CommittedMemoryRegion del_rgn(addr, sz, *call_stack());
+    address end = addr + sz;
 
-    while (head != NULL && !uncommitted_rgn.is_empty()) {
-      CommittedMemoryRegion* crgn = head->data();
-      // this committed region overlaps to region to uncommit
-      if (crgn->overlap_region(uncommitted_rgn.base(), uncommitted_rgn.size())) {
-        if (crgn->same_region(uncommitted_rgn.base(), uncommitted_rgn.size())) {
-          // find matched region, remove the node will do
-          VirtualMemorySummary::record_uncommitted_memory(uncommitted_rgn.size(), flag());
+    LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
+    LinkedListNode<CommittedMemoryRegion>* prev = NULL;
+    CommittedMemoryRegion* crgn;
+
+    while (head != NULL) {
+      crgn = head->data();
+
+      if (crgn->same_region(addr, sz)) {
+        VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
           _committed_regions.remove_after(prev);
           return true;
-        } else if (crgn->contain_region(uncommitted_rgn.base(), uncommitted_rgn.size())) {
-          // this committed region contains whole uncommitted region
-          VirtualMemorySummary::record_uncommitted_memory(uncommitted_rgn.size(), flag());
-          return remove_uncommitted_region(head, uncommitted_rgn.base(), uncommitted_rgn.size());
-        } else if (uncommitted_rgn.contain_region(crgn->base(), crgn->size())) {
-          // this committed region has been uncommitted
-          size_t exclude_size = crgn->end() - uncommitted_rgn.base();
-          uncommitted_rgn.exclude_region(uncommitted_rgn.base(), exclude_size);
+      }
+
+      // del_rgn contains crgn
+      if (del_rgn.contain_region(crgn->base(), crgn->size())) {
           VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
-          LinkedListNode<CommittedMemoryRegion>* tmp = head;
           head = head->next();
           _committed_regions.remove_after(prev);
-          continue;
-        } else if (crgn->contain_address(uncommitted_rgn.base())) {
-          size_t toUncommitted = crgn->end() - uncommitted_rgn.base();
-          crgn->exclude_region(uncommitted_rgn.base(), toUncommitted);
-          uncommitted_rgn.exclude_region(uncommitted_rgn.base(), toUncommitted);
-          VirtualMemorySummary::record_uncommitted_memory(toUncommitted, flag());
-        } else if (uncommitted_rgn.contain_address(crgn->base())) {
-          size_t toUncommitted = uncommitted_rgn.end() - crgn->base();
-          crgn->exclude_region(crgn->base(), toUncommitted);
-          uncommitted_rgn.exclude_region(uncommitted_rgn.end() - toUncommitted,
-            toUncommitted);
-          VirtualMemorySummary::record_uncommitted_memory(toUncommitted, flag());
+        continue;  // don't update head or prev
         }
+
+      // Found addr in the current crgn. There are 2 subcases:
+      if (crgn->contain_address(addr)) {
+
+        // (1) Found addr+size in current crgn as well. (del_rgn is contained in crgn)
+        if (crgn->contain_address(end - 1)) {
+          VirtualMemorySummary::record_uncommitted_memory(sz, flag());
+          return remove_uncommitted_region(head, addr, sz); // done!
+        } else {
+          // (2) Did not find del_rgn's end in crgn.
+          size_t size = crgn->end() - del_rgn.base();
+          crgn->exclude_region(addr, size);
+          VirtualMemorySummary::record_uncommitted_memory(size, flag());
       }
+
+      } else if (crgn->contain_address(end - 1)) {
+      // Found del_rgn's end, but not its base addr.
+        size_t size = del_rgn.end() - crgn->base();
+        crgn->exclude_region(crgn->base(), size);
+        VirtualMemorySummary::record_uncommitted_memory(size, flag());
+        return true;  // should be done if the list is sorted properly!
+      }
+
       prev = head;
       head = head->next();
     }
@@ -386,7 +388,8 @@
 
   assert(reserved_rgn != NULL, "No reserved region");
   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
-  return reserved_rgn->add_committed_region(addr, size, stack);
+  bool result = reserved_rgn->add_committed_region(addr, size, stack);
+  return result;
 }
 
 bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) {
@@ -398,7 +401,8 @@
   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
   assert(reserved_rgn != NULL, "No reserved region");
   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
-  return reserved_rgn->remove_uncommitted_region(addr, size);
+  bool result = reserved_rgn->remove_uncommitted_region(addr, size);
+  return result;
 }
 
 bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
@@ -488,5 +492,3 @@
 
   return true;
 }
-
-
--- a/hotspot/src/share/vm/services/virtualMemoryTracker.hpp	Wed Jul 05 21:50:08 2017 +0200
+++ b/hotspot/src/share/vm/services/virtualMemoryTracker.hpp	Fri Jun 10 15:13:37 2016 -0700
@@ -261,8 +261,7 @@
     VirtualMemoryRegion(addr, size), _stack(stack) { }
 
   inline int compare(const CommittedMemoryRegion& rgn) const {
-    if (overlap_region(rgn.base(), rgn.size()) ||
-        adjacent_to   (rgn.base(), rgn.size())) {
+    if (overlap_region(rgn.base(), rgn.size())) {
       return 0;
     } else {
       if (base() == rgn.base()) {
--- a/hotspot/src/share/vm/services/writeableFlags.cpp	Wed Jul 05 21:50:08 2017 +0200
+++ b/hotspot/src/share/vm/services/writeableFlags.cpp	Fri Jun 10 15:13:37 2016 -0700
@@ -93,12 +93,12 @@
 
 // set a boolean global flag
 Flag::Error WriteableFlags::set_bool_flag(const char* name, const char* arg, Flag::Flags origin, FormatBuffer<80>& err_msg) {
-  int value = true;
-
-  if (sscanf(arg, "%d", &value)) {
-    return set_bool_flag(name, value != 0, origin, err_msg);
+  if ((strcasecmp(arg, "true") == 0) || (*arg == '1' && *(arg + 1) == 0)) {
+    return set_bool_flag(name, true, origin, err_msg);
+  } else if ((strcasecmp(arg, "false") == 0) || (*arg == '0' && *(arg + 1) == 0)) {
+    return set_bool_flag(name, false, origin, err_msg);
   }
-  err_msg.print("flag value must be a boolean (1 or 0)");
+  err_msg.print("flag value must be a boolean (1/0 or true/false)");
   return Flag::WRONG_FORMAT;
 }
 
--- a/hotspot/src/share/vm/utilities/linkedlist.hpp	Wed Jul 05 21:50:08 2017 +0200
+++ b/hotspot/src/share/vm/utilities/linkedlist.hpp	Fri Jun 10 15:13:37 2016 -0700
@@ -259,6 +259,11 @@
 
   virtual bool remove(LinkedListNode<E>* node) {
     LinkedListNode<E>* p = this->head();
+    if (p == node) {
+      this->set_head(p->next());
+      delete_node(node);
+      return true;
+    }
     while (p != NULL && p->next() != node) {
       p = p->next();
     }
--- a/hotspot/test/gc/TestSmallHeap.java	Wed Jul 05 21:50:08 2017 +0200
+++ b/hotspot/test/gc/TestSmallHeap.java	Fri Jun 10 15:13:37 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,22 +23,15 @@
 
 /**
  * @test TestSmallHeap
- * @bug 8067438
+ * @bug 8067438 8152239
  * @requires vm.gc=="null"
- * @requires (vm.opt.AggressiveOpts=="null") | (vm.opt.AggressiveOpts=="false")
- * @requires vm.compMode != "Xcomp"
- * @requires vm.opt.UseCompressedOops != false
  * @summary Verify that starting the VM with a small heap works
- * @library /testlibrary /test/lib
+ * @library /testlibrary /test/lib /test/lib/share/classes
  * @modules java.base/jdk.internal.misc
  * @modules java.management/sun.management
- * @ignore 8076621
  * @build TestSmallHeap
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xmx2m -XX:+UseParallelGC TestSmallHeap
- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xmx2m -XX:+UseSerialGC TestSmallHeap
- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xmx2m -XX:+UseG1GC TestSmallHeap
- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xmx2m -XX:+UseConcMarkSweepGC TestSmallHeap
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI TestSmallHeap
  */
 
 /* Note: It would be nice to verify the minimal supported heap size (2m) here,
@@ -60,23 +53,55 @@
  * So, the expected heap size is page_size * 512.
  */
 
-import jdk.test.lib.*;
-import com.sun.management.HotSpotDiagnosticMXBean;
-import java.lang.management.ManagementFactory;
-import static jdk.test.lib.Asserts.*;
+import jdk.test.lib.Asserts;
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.process.ProcessTools;
+
+import java.util.LinkedList;
 
 import sun.hotspot.WhiteBox;
 
 public class TestSmallHeap {
 
-    public static void main(String[] args) {
+    public static void main(String[] args) throws Exception {
+        // Do all work in the VM driving the test, the VM
+        // with the small heap size should do as little as
+        // possible to avoid hitting an OOME.
         WhiteBox wb = WhiteBox.getWhiteBox();
         int pageSize = wb.getVMPageSize();
         int heapBytesPerCard = 512;
         long expectedMaxHeap = pageSize * heapBytesPerCard;
-        String maxHeap
-            = ManagementFactory.getPlatformMXBean(HotSpotDiagnosticMXBean.class)
-                .getVMOption("MaxHeapSize").getValue();
-        assertEQ(Long.parseLong(maxHeap), expectedMaxHeap);
+
+        verifySmallHeapSize("-XX:+UseParallelGC", expectedMaxHeap);
+        verifySmallHeapSize("-XX:+UseSerialGC", expectedMaxHeap);
+        verifySmallHeapSize("-XX:+UseG1GC", expectedMaxHeap);
+        verifySmallHeapSize("-XX:+UseConcMarkSweepGC", expectedMaxHeap);
+    }
+
+    private static void verifySmallHeapSize(String gc, long expectedMaxHeap) throws Exception {
+        LinkedList<String> vmOptions = new LinkedList<>();
+        vmOptions.add(gc);
+        vmOptions.add("-Xmx2m");
+        vmOptions.add("-XX:+PrintFlagsFinal");
+        vmOptions.add(VerifyHeapSize.class.getName());
+
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(vmOptions.toArray(new String[0]));
+        OutputAnalyzer analyzer = new OutputAnalyzer(pb.start());
+        analyzer.shouldHaveExitValue(0);
+
+        long maxHeapSize = Long.parseLong(analyzer.firstMatch("MaxHeapSize.+=\\s+(\\d+)",1));
+        long actualHeapSize = Long.parseLong(analyzer.firstMatch(VerifyHeapSize.actualMsg + "(\\d+)",1));
+        Asserts.assertEQ(maxHeapSize, expectedMaxHeap);
+        Asserts.assertLessThanOrEqual(actualHeapSize, maxHeapSize);
     }
 }
+
+class VerifyHeapSize {
+    public static final String actualMsg = "Actual heap size: ";
+
+    public static void main(String args[]) {
+        // Avoid string concatenation
+        System.out.print(actualMsg);
+        System.out.println(Runtime.getRuntime().maxMemory());
+    }
+}
--- a/hotspot/test/gc/g1/ihop/TestIHOPErgo.java	Wed Jul 05 21:50:08 2017 +0200
+++ b/hotspot/test/gc/g1/ihop/TestIHOPErgo.java	Fri Jun 10 15:13:37 2016 -0700
@@ -28,6 +28,7 @@
  * @requires vm.gc=="G1" | vm.gc=="null"
  * @requires vm.opt.FlightRecorder != true
  * @requires vm.opt.ExplicitGCInvokesConcurrent != true
+ * @requires vm.opt.MaxGCPauseMillis == "null"
  * @library /testlibrary /test/lib /
  * @modules java.base/jdk.internal.misc
  * @modules java.management
--- a/hotspot/test/gc/g1/mixedgc/TestLogging.java	Wed Jul 05 21:50:08 2017 +0200
+++ b/hotspot/test/gc/g1/mixedgc/TestLogging.java	Fri Jun 10 15:13:37 2016 -0700
@@ -25,6 +25,7 @@
  * @test TestLogging
  * @summary Check that a mixed GC is reflected in the gc logs
  * @requires vm.gc=="G1" | vm.gc=="null"
+ * @requires vm.opt.MaxGCPauseMillis == "null"
  * @library /testlibrary /test/lib
  * @modules java.base/jdk.internal.misc
  * @modules java.management
--- a/hotspot/test/gc/stress/TestMultiThreadStressRSet.java	Wed Jul 05 21:50:08 2017 +0200
+++ b/hotspot/test/gc/stress/TestMultiThreadStressRSet.java	Fri Jun 10 15:13:37 2016 -0700
@@ -33,6 +33,7 @@
  * @key stress
  * @requires vm.gc=="G1" | vm.gc=="null"
  * @requires os.maxMemory > 2G
+ * @requires vm.opt.MaxGCPauseMillis == "null"
  *
  * @summary Stress G1 Remembered Set using multiple threads
  * @modules java.base/jdk.internal.misc
--- a/hotspot/test/gc/stress/TestStressRSetCoarsening.java	Wed Jul 05 21:50:08 2017 +0200
+++ b/hotspot/test/gc/stress/TestStressRSetCoarsening.java	Fri Jun 10 15:13:37 2016 -0700
@@ -30,6 +30,7 @@
  * @bug 8146984 8147087
  * @requires vm.gc=="G1" | vm.gc=="null"
  * @requires os.maxMemory > 3G
+ * @requires vm.opt.MaxGCPauseMillis == "null"
  *
  * @summary Stress G1 Remembered Set by creating a lot of cross region links
  * @modules java.base/jdk.internal.misc
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/NMT/CommitOverlappingRegions.java	Fri Jun 10 15:13:37 2016 -0700
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary Test commits of overlapping regions of memory.
+ * @key nmt jcmd
+ * @library /testlibrary /test/lib
+ * @modules java.base/jdk.internal.misc
+ *          java.management
+ * @build   CommitOverlappingRegions
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail CommitOverlappingRegions
+ */
+
+import jdk.test.lib.*;
+import sun.hotspot.WhiteBox;
+
+public class CommitOverlappingRegions {
+    public static WhiteBox wb = WhiteBox.getWhiteBox();
+    public static void main(String args[]) throws Exception {
+        OutputAnalyzer output;
+        long size = 32 * 1024;
+        long addr = wb.NMTReserveMemory(8*size);
+
+        String pid = Long.toString(ProcessTools.getProcessId());
+        ProcessBuilder pb = new ProcessBuilder();
+
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail"});
+        System.out.println("Address is " + Long.toHexString(addr));
+
+        // Start: . . . . . . . .
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=256KB, committed=0KB)");
+
+        // Committing: * * * . . . . .
+        // Region:     * * * . . . . .
+        // Expected Total: 3 x 32KB = 96KB
+        wb.NMTCommitMemory(addr + 0*size, 3*size);
+
+        // Committing: . . . . * * * .
+        // Region:     * * * . * * * .
+        // Expected Total: 6 x 32KB = 192KB
+        wb.NMTCommitMemory(addr + 4*size, 3*size);
+
+        // Check output after first 2 commits.
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=256KB, committed=192KB)");
+
+        // Committing: . . * * * . . .
+        // Region:     * * * * * * * .
+        // Expected Total: 7 x 32KB = 224KB
+        wb.NMTCommitMemory(addr + 2*size, 3*size);
+
+        // Check output after overlapping commit.
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=256KB, committed=224KB)");
+
+        // Uncommitting: * * * * * * * *
+        // Region:       . . . . . . . .
+        // Expected Total: 0 x 32KB = 0KB
+        wb.NMTUncommitMemory(addr + 0*size, 8*size);
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=256KB, committed=0KB)");
+
+        // Committing: * * . . . . . .
+        // Region:     * * . . . . . .
+        // Expected Total: 2 x 32KB = 64KB
+        wb.NMTCommitMemory(addr + 0*size, 2*size);
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=256KB, committed=64KB)");
+
+        // Committing: . * * * . . . .
+        // Region:     * * * * . . . .
+        // Expected Total: 4 x 32KB = 128KB
+        wb.NMTCommitMemory(addr + 1*size, 3*size);
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=256KB, committed=128KB)");
+
+        // Uncommitting: * * * . . . . .
+        // Region:       . . . * . . . .
+        // Expected Total: 1 x 32KB = 32KB
+        wb.NMTUncommitMemory(addr + 0*size, 3*size);
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=256KB, committed=32KB)");
+
+        // Committing: . . . * * . . .
+        // Region:     . . . * * . . .
+        // Expected Total: 2 x 32KB = 64KB
+        wb.NMTCommitMemory(addr + 3*size, 2*size);
+        System.out.println("Address is " + Long.toHexString(addr + 3*size));
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=256KB, committed=64KB)");
+
+        // Committing: . . . . * * . .
+        // Region:     . . . * * * . .
+        // Expected Total: 3 x 32KB = 96KB
+        wb.NMTCommitMemory(addr + 4*size, 2*size);
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=256KB, committed=96KB)");
+
+        // Committing: . . . . . * * .
+        // Region:     . . . * * * * .
+        // Expected Total: 4 x 32KB = 128KB
+        wb.NMTCommitMemory(addr + 5*size, 2*size);
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=256KB, committed=128KB)");
+
+        // Committing: . . . . . . * *
+        // Region:     . . . * * * * *
+        // Expected Total: 5 x 32KB = 160KB
+        wb.NMTCommitMemory(addr + 6*size, 2*size);
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=256KB, committed=160KB)");
+
+        // Uncommitting: * * * * * * * *
+        // Region:       . . . . . . . .
+        // Expected Total: 0 x 32KB = 32KB
+        wb.NMTUncommitMemory(addr + 0*size, 8*size);
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=256KB, committed=0KB)");
+    }
+}
--- a/hotspot/test/runtime/ReservedStack/ReservedStackTest.java	Wed Jul 05 21:50:08 2017 +0200
+++ b/hotspot/test/runtime/ReservedStack/ReservedStackTest.java	Fri Jun 10 15:13:37 2016 -0700
@@ -27,6 +27,7 @@
  * @modules java.base/jdk.internal.misc
  * @modules java.base/jdk.internal.vm.annotation
  * @build jdk.test.lib.*
+ * @run main/othervm -Xint ReservedStackTest
  * @run main/othervm -XX:-Inline -XX:CompileCommand=exclude,java/util/concurrent/locks/AbstractOwnableSynchronizer.setExclusiveOwnerThread ReservedStackTest
  */
 
@@ -196,9 +197,12 @@
             System.out.println("Test started execution at frame = " + (counter - deframe));
             String result = test.getResult();
             // The feature is not fully implemented on all platforms,
-            // corruptions are still possible
-            boolean supportedPlatform = Platform.isSolaris() || Platform.isOSX()
-                || (Platform.isLinux() && (Platform.isX86() || Platform.isX64()));
+            // corruptions are still possible.
+            boolean supportedPlatform =
+                Platform.isAix() ||
+                (Platform.isLinux() && (Platform.isPPC() || Platform.isX64() || Platform.isX86())) ||
+                Platform.isOSX() ||
+                Platform.isSolaris();
             if (supportedPlatform && !result.contains("PASSED")) {
                 System.out.println(result);
                 throw new Error(result);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/ReservedStack/ReservedStackTestCompiler.java	Fri Jun 10 15:13:37 2016 -0700
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test ReservedStackTestCompiler
+ * @summary Run ReservedStackTest with dedicated compilers C1 and C2.
+ * @requires vm.flavor == "server"
+ * @library /testlibrary
+ * @modules java.base/jdk.internal.misc
+ * @modules java.base/jdk.internal.vm.annotation
+ * @build jdk.test.lib.* ReservedStackTest
+ * @run main/othervm -XX:+TieredCompilation -XX:TieredStopAtLevel=1 -XX:-Inline -XX:CompileCommand=exclude,java/util/concurrent/locks/AbstractOwnableSynchronizer.setExclusiveOwnerThread ReservedStackTest
+ * @run main/othervm -XX:-TieredCompilation                         -XX:-Inline -XX:CompileCommand=exclude,java/util/concurrent/locks/AbstractOwnableSynchronizer.setExclusiveOwnerThread ReservedStackTest
+ */
+
+// Intentionally left blank. Just runs ReservedStackTest with @requires annotation.
--- a/hotspot/test/serviceability/dcmd/vm/SetVMFlagTest.java	Wed Jul 05 21:50:08 2017 +0200
+++ b/hotspot/test/serviceability/dcmd/vm/SetVMFlagTest.java	Fri Jun 10 15:13:37 2016 -0700
@@ -56,6 +56,25 @@
         run(new JMXExecutor());
     }
 
+    private void setMutableFlagInternal(CommandExecutor executor, String flag,
+                                        boolean val, boolean isNumeric) {
+        String strFlagVal;
+        if (isNumeric) {
+            strFlagVal = val ? "1" : "0";
+        } else {
+            strFlagVal = val ? "true" : "false";
+        }
+
+        OutputAnalyzer out = executor.execute("VM.set_flag " + flag + " " + strFlagVal);
+        out.stderrShouldBeEmpty();
+
+        out = getAllFlags(executor);
+
+        String newFlagVal = out.firstMatch(MANAGEABLE_PATTERN.replace("(\\S+)", flag), 1);
+
+        assertNotEquals(newFlagVal, val ? "1" : "0");
+    }
+
     private void setMutableFlag(CommandExecutor executor) {
         OutputAnalyzer out = getAllFlags(executor);
         String flagName = out.firstMatch(MANAGEABLE_PATTERN, 1);
@@ -69,15 +88,8 @@
         }
 
         Boolean blnVal = Boolean.parseBoolean(flagVal);
-
-        out = executor.execute("VM.set_flag " + flagName + " " + (blnVal ? 0 : 1));
-        out.stderrShouldBeEmpty();
-
-        out = getAllFlags(executor);
-
-        String newFlagVal = out.firstMatch(MANAGEABLE_PATTERN.replace("(\\S+)", flagName), 1);
-
-        assertNotEquals(newFlagVal, flagVal);
+        setMutableFlagInternal(executor, flagName, !blnVal, true);
+        setMutableFlagInternal(executor, flagName, blnVal, false);
     }
 
     private void setMutableFlagWithInvalidValue(CommandExecutor executor) {
@@ -95,7 +107,7 @@
         // a boolean flag accepts only 0/1 as its value
         out = executor.execute("VM.set_flag " + flagName + " unexpected_value");
         out.stderrShouldBeEmpty();
-        out.stdoutShouldContain("flag value must be a boolean (1 or 0)");
+        out.stdoutShouldContain("flag value must be a boolean (1/0 or true/false)");
 
         out = getAllFlags(executor);