Merge
authorvlivanov
Wed, 11 May 2016 00:31:28 +0300
changeset 38287 ab815717c073
parent 38263 a7488329ad27 (current diff)
parent 38286 0ddb6f84e138 (diff)
child 38288 3430942c1dda
Merge
hotspot/src/share/vm/classfile/vmSymbols.hpp
hotspot/src/share/vm/opto/library_call.cpp
hotspot/src/share/vm/runtime/globals.hpp
--- a/hotspot/src/cpu/aarch64/vm/aarch64.ad	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/src/cpu/aarch64/vm/aarch64.ad	Wed May 11 00:31:28 2016 +0300
@@ -996,6 +996,7 @@
 source_hpp %{
 
 #include "gc/shared/cardTableModRefBS.hpp"
+#include "opto/addnode.hpp"
 
 class CallStubImpl {
 
@@ -1061,6 +1062,9 @@
 
   // predicate controlling translation of StoreCM
   bool unnecessary_storestore(const Node *storecm);
+
+  // predicate controlling addressing modes
+  bool size_fits_all_mem_uses(AddPNode* addp, int shift);
 %}
 
 source %{
@@ -3449,11 +3453,6 @@
 // Does the CPU require late expand (see block.cpp for description of late expand)?
 const bool Matcher::require_postalloc_expand = false;
 
-// Should the Matcher clone shifts on addressing modes, expecting them
-// to be subsumed into complex addressing expressions or compute them
-// into registers?  True for Intel but false for most RISCs
-const bool Matcher::clone_shift_expressions = false;
-
 // Do we need to mask the count passed to shift instructions or does
 // the cpu only look at the lower 5/6 bits anyway?
 const bool Matcher::need_masked_shift_count = false;
@@ -3572,8 +3571,119 @@
   return FP_REG_mask();
 }
 
+bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
+  for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
+    Node* u = addp->fast_out(i);
+    if (u->is_Mem()) {
+      int opsize = u->as_Mem()->memory_size();
+      assert(opsize > 0, "unexpected memory operand size");
+      if (u->as_Mem()->memory_size() != (1<<shift)) {
+        return false;
+      }
+    }
+  }
+  return true;
+}
+
 const bool Matcher::convi2l_type_required = false;
 
+// Should the Matcher clone shifts on addressing modes, expecting them
+// to be subsumed into complex addressing expressions or compute them
+// into registers?
+bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
+  if (clone_base_plus_offset_address(m, mstack, address_visited)) {
+    return true;
+  }
+
+  Node *off = m->in(AddPNode::Offset);
+  if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
+      size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
+      // Are there other uses besides address expressions?
+      !is_visited(off)) {
+    address_visited.set(off->_idx); // Flag as address_visited
+    mstack.push(off->in(2), Visit);
+    Node *conv = off->in(1);
+    if (conv->Opcode() == Op_ConvI2L &&
+        // Are there other uses besides address expressions?
+        !is_visited(conv)) {
+      address_visited.set(conv->_idx); // Flag as address_visited
+      mstack.push(conv->in(1), Pre_Visit);
+    } else {
+      mstack.push(conv, Pre_Visit);
+    }
+    address_visited.test_set(m->_idx); // Flag as address_visited
+    mstack.push(m->in(AddPNode::Address), Pre_Visit);
+    mstack.push(m->in(AddPNode::Base), Pre_Visit);
+    return true;
+  } else if (off->Opcode() == Op_ConvI2L &&
+             // Are there other uses besides address expressions?
+             !is_visited(off)) {
+    address_visited.test_set(m->_idx); // Flag as address_visited
+    address_visited.set(off->_idx); // Flag as address_visited
+    mstack.push(off->in(1), Pre_Visit);
+    mstack.push(m->in(AddPNode::Address), Pre_Visit);
+    mstack.push(m->in(AddPNode::Base), Pre_Visit);
+    return true;
+  }
+  return false;
+}
+
+// Transform:
+// (AddP base (AddP base address (LShiftL index con)) offset)
+// into:
+// (AddP base (AddP base offset) (LShiftL index con))
+// to take full advantage of ARM's addressing modes
+void Compile::reshape_address(AddPNode* addp) {
+  Node *addr = addp->in(AddPNode::Address);
+  if (addr->is_AddP() && addr->in(AddPNode::Base) == addp->in(AddPNode::Base)) {
+    const AddPNode *addp2 = addr->as_AddP();
+    if ((addp2->in(AddPNode::Offset)->Opcode() == Op_LShiftL &&
+         addp2->in(AddPNode::Offset)->in(2)->is_Con() &&
+         size_fits_all_mem_uses(addp, addp2->in(AddPNode::Offset)->in(2)->get_int())) ||
+        addp2->in(AddPNode::Offset)->Opcode() == Op_ConvI2L) {
+
+      // Any use that can't embed the address computation?
+      for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
+        Node* u = addp->fast_out(i);
+        if (!u->is_Mem() || u->is_LoadVector() || u->is_StoreVector() || u->Opcode() == Op_StoreCM) {
+          return;
+        }
+      }
+      
+      Node* off = addp->in(AddPNode::Offset);
+      Node* addr2 = addp2->in(AddPNode::Address);
+      Node* base = addp->in(AddPNode::Base);
+      
+      Node* new_addr = NULL;
+      // Check whether the graph already has the new AddP we need
+      // before we create one (no GVN available here).
+      for (DUIterator_Fast imax, i = addr2->fast_outs(imax); i < imax; i++) {
+        Node* u = addr2->fast_out(i);
+        if (u->is_AddP() &&
+            u->in(AddPNode::Base) == base &&
+            u->in(AddPNode::Address) == addr2 &&
+            u->in(AddPNode::Offset) == off) {
+          new_addr = u;
+          break;
+        }
+      }
+      
+      if (new_addr == NULL) {
+        new_addr = new AddPNode(base, addr2, off);
+      }
+      Node* new_off = addp2->in(AddPNode::Offset);
+      addp->set_req(AddPNode::Address, new_addr);
+      if (addr->outcnt() == 0) {
+        addr->disconnect_inputs(NULL, this);
+      }
+      addp->set_req(AddPNode::Offset, new_off);
+      if (off->outcnt() == 0) {
+        off->disconnect_inputs(NULL, this);
+      }
+    }
+  }
+}
+
 // helper for encoding java_to_runtime calls on sim
 //
 // this is needed to compute the extra arguments required when
@@ -3643,12 +3753,10 @@
     // encoder that the index needs to be sign extended, so we have to
     // enumerate all the cases.
     switch (opcode) {
-    case INDINDEXSCALEDOFFSETI2L:
     case INDINDEXSCALEDI2L:
-    case INDINDEXSCALEDOFFSETI2LN:
     case INDINDEXSCALEDI2LN:
-    case INDINDEXOFFSETI2L:
-    case INDINDEXOFFSETI2LN:
+    case INDINDEXI2L:
+    case INDINDEXI2LN:
       scale = Address::sxtw(size);
       break;
     default:
@@ -3658,12 +3766,8 @@
     if (index == -1) {
       (masm.*insn)(reg, Address(base, disp));
     } else {
-      if (disp == 0) {
-        (masm.*insn)(reg, Address(base, as_Register(index), scale));
-      } else {
-        masm.lea(rscratch1, Address(base, disp));
-        (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
-      }
+      assert(disp == 0, "unsupported address mode: disp = %d", disp);
+      (masm.*insn)(reg, Address(base, as_Register(index), scale));
     }
   }
 
@@ -3674,9 +3778,7 @@
     Address::extend scale;
 
     switch (opcode) {
-    case INDINDEXSCALEDOFFSETI2L:
     case INDINDEXSCALEDI2L:
-    case INDINDEXSCALEDOFFSETI2LN:
     case INDINDEXSCALEDI2LN:
       scale = Address::sxtw(size);
       break;
@@ -3687,12 +3789,8 @@
      if (index == -1) {
       (masm.*insn)(reg, Address(base, disp));
     } else {
-      if (disp == 0) {
-        (masm.*insn)(reg, Address(base, as_Register(index), scale));
-      } else {
-        masm.lea(rscratch1, Address(base, disp));
-        (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
-      }
+      assert(disp == 0, "unsupported address mode: disp = %d", disp);
+      (masm.*insn)(reg, Address(base, as_Register(index), scale));
     }
   }
 
@@ -6106,65 +6204,10 @@
   %}
 %}
 
-operand indIndexScaledOffsetI(iRegP reg, iRegL lreg, immIScale scale, immIU12 off)
-%{
-  constraint(ALLOC_IN_RC(ptr_reg));
-  match(AddP (AddP reg (LShiftL lreg scale)) off);
-  op_cost(INSN_COST);
-  format %{ "$reg, $lreg lsl($scale), $off" %}
-  interface(MEMORY_INTER) %{
-    base($reg);
-    index($lreg);
-    scale($scale);
-    disp($off);
-  %}
-%}
-
-operand indIndexScaledOffsetL(iRegP reg, iRegL lreg, immIScale scale, immLU12 off)
-%{
-  constraint(ALLOC_IN_RC(ptr_reg));
-  match(AddP (AddP reg (LShiftL lreg scale)) off);
-  op_cost(INSN_COST);
-  format %{ "$reg, $lreg lsl($scale), $off" %}
-  interface(MEMORY_INTER) %{
-    base($reg);
-    index($lreg);
-    scale($scale);
-    disp($off);
-  %}
-%}
-
-operand indIndexOffsetI2L(iRegP reg, iRegI ireg, immLU12 off)
-%{
-  constraint(ALLOC_IN_RC(ptr_reg));
-  match(AddP (AddP reg (ConvI2L ireg)) off);
-  op_cost(INSN_COST);
-  format %{ "$reg, $ireg, $off I2L" %}
-  interface(MEMORY_INTER) %{
-    base($reg);
-    index($ireg);
-    scale(0x0);
-    disp($off);
-  %}
-%}
-
-operand indIndexScaledOffsetI2L(iRegP reg, iRegI ireg, immIScale scale, immLU12 off)
-%{
-  constraint(ALLOC_IN_RC(ptr_reg));
-  match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
-  op_cost(INSN_COST);
-  format %{ "$reg, $ireg sxtw($scale), $off I2L" %}
-  interface(MEMORY_INTER) %{
-    base($reg);
-    index($ireg);
-    scale($scale);
-    disp($off);
-  %}
-%}
-
 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
 %{
   constraint(ALLOC_IN_RC(ptr_reg));
+  predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
   match(AddP reg (LShiftL (ConvI2L ireg) scale));
   op_cost(0);
   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
@@ -6179,6 +6222,7 @@
 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
 %{
   constraint(ALLOC_IN_RC(ptr_reg));
+  predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
   match(AddP reg (LShiftL lreg scale));
   op_cost(0);
   format %{ "$reg, $lreg lsl($scale)" %}
@@ -6190,6 +6234,20 @@
   %}
 %}
 
+operand indIndexI2L(iRegP reg, iRegI ireg)
+%{
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP reg (ConvI2L ireg));
+  op_cost(0);
+  format %{ "$reg, $ireg, 0, I2L" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index($ireg);
+    scale(0x0);
+    disp(0x0);
+  %}
+%}
+
 operand indIndex(iRegP reg, iRegL lreg)
 %{
   constraint(ALLOC_IN_RC(ptr_reg));
@@ -6331,69 +6389,9 @@
   %}
 %}
 
-operand indIndexScaledOffsetIN(iRegN reg, iRegL lreg, immIScale scale, immIU12 off)
-%{
-  predicate(Universe::narrow_oop_shift() == 0);
-  constraint(ALLOC_IN_RC(ptr_reg));
-  match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
-  op_cost(0);
-  format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
-  interface(MEMORY_INTER) %{
-    base($reg);
-    index($lreg);
-    scale($scale);
-    disp($off);
-  %}
-%}
-
-operand indIndexScaledOffsetLN(iRegN reg, iRegL lreg, immIScale scale, immLU12 off)
-%{
-  predicate(Universe::narrow_oop_shift() == 0);
-  constraint(ALLOC_IN_RC(ptr_reg));
-  match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
-  op_cost(INSN_COST);
-  format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
-  interface(MEMORY_INTER) %{
-    base($reg);
-    index($lreg);
-    scale($scale);
-    disp($off);
-  %}
-%}
-
-operand indIndexOffsetI2LN(iRegN reg, iRegI ireg, immLU12 off)
-%{
-  predicate(Universe::narrow_oop_shift() == 0);
-  constraint(ALLOC_IN_RC(ptr_reg));
-  match(AddP (AddP (DecodeN reg) (ConvI2L ireg)) off);
-  op_cost(INSN_COST);
-  format %{ "$reg, $ireg, $off I2L\t# narrow" %}
-  interface(MEMORY_INTER) %{
-    base($reg);
-    index($ireg);
-    scale(0x0);
-    disp($off);
-  %}
-%}
-
-operand indIndexScaledOffsetI2LN(iRegN reg, iRegI ireg, immIScale scale, immLU12 off)
-%{
-  predicate(Universe::narrow_oop_shift() == 0);
-  constraint(ALLOC_IN_RC(ptr_reg));
-  match(AddP (AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale)) off);
-  op_cost(INSN_COST);
-  format %{ "$reg, $ireg sxtw($scale), $off I2L\t# narrow" %}
-  interface(MEMORY_INTER) %{
-    base($reg);
-    index($ireg);
-    scale($scale);
-    disp($off);
-  %}
-%}
-
 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
 %{
-  predicate(Universe::narrow_oop_shift() == 0);
+  predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
   constraint(ALLOC_IN_RC(ptr_reg));
   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
   op_cost(0);
@@ -6408,7 +6406,7 @@
 
 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
 %{
-  predicate(Universe::narrow_oop_shift() == 0);
+  predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
   constraint(ALLOC_IN_RC(ptr_reg));
   match(AddP (DecodeN reg) (LShiftL lreg scale));
   op_cost(0);
@@ -6421,6 +6419,21 @@
   %}
 %}
 
+operand indIndexI2LN(iRegN reg, iRegI ireg)
+%{
+  predicate(Universe::narrow_oop_shift() == 0);
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP (DecodeN reg) (ConvI2L ireg));
+  op_cost(0);
+  format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index($ireg);
+    scale(0x0);
+    disp(0x0);
+  %}
+%}
+
 operand indIndexN(iRegN reg, iRegL lreg)
 %{
   predicate(Universe::narrow_oop_shift() == 0);
@@ -6641,9 +6654,8 @@
 // memory is used to define read/write location for load/store
 // instruction defs. we can turn a memory op into an Address
 
-opclass memory(indirect, indIndexScaledOffsetI, indIndexScaledOffsetL, indIndexOffsetI2L, indIndexScaledOffsetI2L, indIndexScaled, indIndexScaledI2L, indIndex, indOffI, indOffL,
-               indirectN, indIndexScaledOffsetIN, indIndexScaledOffsetLN, indIndexOffsetI2LN, indIndexScaledOffsetI2LN, indIndexScaledN, indIndexScaledI2LN, indIndexN, indOffIN, indOffLN);
-
+opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI, indOffL,
+               indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN);
 
 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
 // operations. it allows the src to be either an iRegI or a (ConvL2I
--- a/hotspot/src/cpu/ppc/vm/c1_LIRGenerator_ppc.cpp	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/src/cpu/ppc/vm/c1_LIRGenerator_ppc.cpp	Wed May 11 00:31:28 2016 +0300
@@ -1427,10 +1427,10 @@
       ShouldNotReachHere();
     }
   }
+}
 
-  void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
-    fatal("vectorizedMismatch intrinsic is not implemented on this platform");
-  }
+void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
+  fatal("vectorizedMismatch intrinsic is not implemented on this platform");
 }
 
 void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
--- a/hotspot/src/cpu/ppc/vm/ppc.ad	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/src/cpu/ppc/vm/ppc.ad	Wed May 11 00:31:28 2016 +0300
@@ -817,6 +817,16 @@
 
 source %{
 
+// Should the Matcher clone shifts on addressing modes, expecting them
+// to be subsumed into complex addressing expressions or compute them
+// into registers?
+bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
+  return clone_base_plus_offset_address(m, mstack, address_visited);
+}
+
+void Compile::reshape_address(AddPNode* addp) {
+}
+
 // Optimize load-acquire.
 //
 // Check if acquire is unnecessary due to following operation that does
@@ -2157,11 +2167,6 @@
 // Power6 requires postalloc expand (see block.cpp for description of postalloc expand).
 const bool Matcher::require_postalloc_expand = true;
 
-// Should the Matcher clone shifts on addressing modes, expecting them to
-// be subsumed into complex addressing expressions or compute them into
-// registers? True for Intel but false for most RISCs.
-const bool Matcher::clone_shift_expressions = false;
-
 // Do we need to mask the count passed to shift instructions or does
 // the cpu only look at the lower 5/6 bits anyway?
 // PowerPC requires masked shift counts.
--- a/hotspot/src/cpu/sparc/vm/sparc.ad	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/src/cpu/sparc/vm/sparc.ad	Wed May 11 00:31:28 2016 +0300
@@ -1995,11 +1995,6 @@
 // Does the CPU require late expand (see block.cpp for description of late expand)?
 const bool Matcher::require_postalloc_expand = false;
 
-// Should the Matcher clone shifts on addressing modes, expecting them to
-// be subsumed into complex addressing expressions or compute them into
-// registers?  True for Intel but false for most RISCs
-const bool Matcher::clone_shift_expressions = false;
-
 // Do we need to mask the count passed to shift instructions or does
 // the cpu only look at the lower 5/6 bits anyway?
 const bool Matcher::need_masked_shift_count = false;
@@ -2133,8 +2128,19 @@
   return L7_REGP_mask();
 }
 
+
 const bool Matcher::convi2l_type_required = true;
 
+// Should the Matcher clone shifts on addressing modes, expecting them
+// to be subsumed into complex addressing expressions or compute them
+// into registers?
+bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
+  return clone_base_plus_offset_address(m, mstack, address_visited);
+}
+
+void Compile::reshape_address(AddPNode* addp) {
+}
+
 %}
 
 
--- a/hotspot/src/cpu/x86/vm/x86.ad	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/src/cpu/x86/vm/x86.ad	Wed May 11 00:31:28 2016 +0300
@@ -1586,6 +1586,8 @@
 
 source %{
 
+#include "opto/addnode.hpp"
+
 // Emit exception handler code.
 // Stuff framesize into a register and call a VM stub routine.
 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
@@ -1861,8 +1863,79 @@
   return false;
 }
 
+
 const bool Matcher::convi2l_type_required = true;
 
+// Check for shift by small constant as well
+static bool clone_shift(Node* shift, Matcher* matcher, Matcher::MStack& mstack, VectorSet& address_visited) {
+  if (shift->Opcode() == Op_LShiftX && shift->in(2)->is_Con() &&
+      shift->in(2)->get_int() <= 3 &&
+      // Are there other uses besides address expressions?
+      !matcher->is_visited(shift)) {
+    address_visited.set(shift->_idx); // Flag as address_visited
+    mstack.push(shift->in(2), Matcher::Visit);
+    Node *conv = shift->in(1);
+#ifdef _LP64
+    // Allow Matcher to match the rule which bypass
+    // ConvI2L operation for an array index on LP64
+    // if the index value is positive.
+    if (conv->Opcode() == Op_ConvI2L &&
+        conv->as_Type()->type()->is_long()->_lo >= 0 &&
+        // Are there other uses besides address expressions?
+        !matcher->is_visited(conv)) {
+      address_visited.set(conv->_idx); // Flag as address_visited
+      mstack.push(conv->in(1), Matcher::Pre_Visit);
+    } else
+#endif
+      mstack.push(conv, Matcher::Pre_Visit);
+    return true;
+  }
+  return false;
+}
+
+// Should the Matcher clone shifts on addressing modes, expecting them
+// to be subsumed into complex addressing expressions or compute them
+// into registers?
+bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
+  Node *off = m->in(AddPNode::Offset);
+  if (off->is_Con()) {
+    address_visited.test_set(m->_idx); // Flag as address_visited
+    Node *adr = m->in(AddPNode::Address);
+
+    // Intel can handle 2 adds in addressing mode
+    // AtomicAdd is not an addressing expression.
+    // Cheap to find it by looking for screwy base.
+    if (adr->is_AddP() &&
+        !adr->in(AddPNode::Base)->is_top() &&
+        // Are there other uses besides address expressions?
+        !is_visited(adr)) {
+      address_visited.set(adr->_idx); // Flag as address_visited
+      Node *shift = adr->in(AddPNode::Offset);
+      if (!clone_shift(shift, this, mstack, address_visited)) {
+        mstack.push(shift, Pre_Visit);
+      }
+      mstack.push(adr->in(AddPNode::Address), Pre_Visit);
+      mstack.push(adr->in(AddPNode::Base), Pre_Visit);
+    } else {
+      mstack.push(adr, Pre_Visit);
+    }
+
+    // Clone X+offset as it also folds into most addressing expressions
+    mstack.push(off, Visit);
+    mstack.push(m->in(AddPNode::Base), Pre_Visit);
+    return true;
+  } else if (clone_shift(off, this, mstack, address_visited)) {
+    address_visited.test_set(m->_idx); // Flag as address_visited
+    mstack.push(m->in(AddPNode::Address), Pre_Visit);
+    mstack.push(m->in(AddPNode::Base), Pre_Visit);
+    return true;
+  }
+  return false;
+}
+
+void Compile::reshape_address(AddPNode* addp) {
+}
+
 // Helper methods for MachSpillCopyNode::implementation().
 static int vec_mov_helper(CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
                           int src_hi, int dst_hi, uint ireg, outputStream* st) {
--- a/hotspot/src/cpu/x86/vm/x86_32.ad	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/src/cpu/x86/vm/x86_32.ad	Wed May 11 00:31:28 2016 +0300
@@ -1438,11 +1438,6 @@
 // Does the CPU require late expand (see block.cpp for description of late expand)?
 const bool Matcher::require_postalloc_expand = false;
 
-// Should the Matcher clone shifts on addressing modes, expecting them to
-// be subsumed into complex addressing expressions or compute them into
-// registers?  True for Intel but false for most RISCs
-const bool Matcher::clone_shift_expressions = true;
-
 // Do we need to mask the count passed to shift instructions or does
 // the cpu only look at the lower 5/6 bits anyway?
 const bool Matcher::need_masked_shift_count = false;
--- a/hotspot/src/cpu/x86/vm/x86_64.ad	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/src/cpu/x86/vm/x86_64.ad	Wed May 11 00:31:28 2016 +0300
@@ -1646,11 +1646,6 @@
 // Does the CPU require late expand (see block.cpp for description of late expand)?
 const bool Matcher::require_postalloc_expand = false;
 
-// Should the Matcher clone shifts on addressing modes, expecting them
-// to be subsumed into complex addressing expressions or compute them
-// into registers?  True for Intel but false for most RISCs
-const bool Matcher::clone_shift_expressions = true;
-
 // Do we need to mask the count passed to shift instructions or does
 // the cpu only look at the lower 5/6 bits anyway?
 const bool Matcher::need_masked_shift_count = false;
--- a/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotVMConfig.java	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotVMConfig.java	Wed May 11 00:31:28 2016 +0300
@@ -1559,13 +1559,13 @@
 
     @HotSpotVMAddress(name = "os::javaTimeMillis") @Stable public long javaTimeMillisAddress;
     @HotSpotVMAddress(name = "os::javaTimeNanos") @Stable public long javaTimeNanosAddress;
-    @HotSpotVMAddress(name = "SharedRuntime::dsin") @Stable public long arithmeticSinAddress;
-    @HotSpotVMAddress(name = "SharedRuntime::dcos") @Stable public long arithmeticCosAddress;
-    @HotSpotVMAddress(name = "SharedRuntime::dtan") @Stable public long arithmeticTanAddress;
-    @HotSpotVMAddress(name = "SharedRuntime::dexp") @Stable public long arithmeticExpAddress;
-    @HotSpotVMAddress(name = "SharedRuntime::dlog") @Stable public long arithmeticLogAddress;
-    @HotSpotVMAddress(name = "SharedRuntime::dlog10") @Stable public long arithmeticLog10Address;
-    @HotSpotVMAddress(name = "SharedRuntime::dpow") @Stable public long arithmeticPowAddress;
+    @HotSpotVMField(name = "CompilerToVM::Data::dsin", type = "address", get = HotSpotVMField.Type.VALUE) @Stable public long arithmeticSinAddress;
+    @HotSpotVMField(name = "CompilerToVM::Data::dcos", type = "address", get = HotSpotVMField.Type.VALUE) @Stable public long arithmeticCosAddress;
+    @HotSpotVMField(name = "CompilerToVM::Data::dtan", type = "address", get = HotSpotVMField.Type.VALUE) @Stable public long arithmeticTanAddress;
+    @HotSpotVMField(name = "CompilerToVM::Data::dexp", type = "address", get = HotSpotVMField.Type.VALUE) @Stable public long arithmeticExpAddress;
+    @HotSpotVMField(name = "CompilerToVM::Data::dlog", type = "address", get = HotSpotVMField.Type.VALUE) @Stable public long arithmeticLogAddress;
+    @HotSpotVMField(name = "CompilerToVM::Data::dlog10", type = "address", get = HotSpotVMField.Type.VALUE) @Stable public long arithmeticLog10Address;
+    @HotSpotVMField(name = "CompilerToVM::Data::dpow", type = "address", get = HotSpotVMField.Type.VALUE) @Stable public long arithmeticPowAddress;
 
     @HotSpotVMFlag(name = "JVMCICounterSize") @Stable public int jvmciCountersSize;
 
--- a/hotspot/src/share/vm/c1/c1_globals.hpp	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/src/share/vm/c1/c1_globals.hpp	Wed May 11 00:31:28 2016 +0300
@@ -176,7 +176,7 @@
   product(bool, InlineSynchronizedMethods, true,                            \
           "Inline synchronized methods")                                    \
                                                                             \
-  develop(bool, InlineNIOCheckIndex, true,                                  \
+  diagnostic(bool, InlineNIOCheckIndex, true,                               \
           "Intrinsify java.nio.Buffer.checkIndex")                          \
                                                                             \
   develop(bool, CanonicalizeNodes, true,                                    \
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp	Wed May 11 00:31:28 2016 +0300
@@ -1255,12 +1255,15 @@
   do_name(weakCompareAndSwapObject_name,         "weakCompareAndSwapObject")                                            \
   do_name(weakCompareAndSwapObjectAcquire_name,  "weakCompareAndSwapObjectAcquire")                                     \
   do_name(weakCompareAndSwapObjectRelease_name,  "weakCompareAndSwapObjectRelease")                                     \
+  do_name(weakCompareAndSwapObjectVolatile_name, "weakCompareAndSwapObjectVolatile")                                    \
   do_name(weakCompareAndSwapLong_name,           "weakCompareAndSwapLong")                                              \
   do_name(weakCompareAndSwapLongAcquire_name,    "weakCompareAndSwapLongAcquire")                                       \
   do_name(weakCompareAndSwapLongRelease_name,    "weakCompareAndSwapLongRelease")                                       \
+  do_name(weakCompareAndSwapLongVolatile_name,   "weakCompareAndSwapLongVolatile")                                      \
   do_name(weakCompareAndSwapInt_name,            "weakCompareAndSwapInt")                                               \
   do_name(weakCompareAndSwapIntAcquire_name,     "weakCompareAndSwapIntAcquire")                                        \
   do_name(weakCompareAndSwapIntRelease_name,     "weakCompareAndSwapIntRelease")                                        \
+  do_name(weakCompareAndSwapIntVolatile_name,    "weakCompareAndSwapIntVolatile")                                       \
                                                                                                                         \
   do_intrinsic(_compareAndSwapObject,             jdk_internal_misc_Unsafe,  compareAndSwapObject_name,             compareAndSwapObject_signature,     F_RN) \
   do_intrinsic(_compareAndExchangeObjectVolatile, jdk_internal_misc_Unsafe,  compareAndExchangeObjectVolatile_name, compareAndExchangeObject_signature, F_RN) \
@@ -1278,12 +1281,15 @@
   do_intrinsic(_weakCompareAndSwapObject,         jdk_internal_misc_Unsafe,  weakCompareAndSwapObject_name,         compareAndSwapObject_signature,     F_R) \
   do_intrinsic(_weakCompareAndSwapObjectAcquire,  jdk_internal_misc_Unsafe,  weakCompareAndSwapObjectAcquire_name,  compareAndSwapObject_signature,     F_R) \
   do_intrinsic(_weakCompareAndSwapObjectRelease,  jdk_internal_misc_Unsafe,  weakCompareAndSwapObjectRelease_name,  compareAndSwapObject_signature,     F_R) \
+  do_intrinsic(_weakCompareAndSwapObjectVolatile, jdk_internal_misc_Unsafe,  weakCompareAndSwapObjectVolatile_name, compareAndSwapObject_signature,     F_R) \
   do_intrinsic(_weakCompareAndSwapLong,           jdk_internal_misc_Unsafe,  weakCompareAndSwapLong_name,           compareAndSwapLong_signature,       F_R) \
   do_intrinsic(_weakCompareAndSwapLongAcquire,    jdk_internal_misc_Unsafe,  weakCompareAndSwapLongAcquire_name,    compareAndSwapLong_signature,       F_R) \
   do_intrinsic(_weakCompareAndSwapLongRelease,    jdk_internal_misc_Unsafe,  weakCompareAndSwapLongRelease_name,    compareAndSwapLong_signature,       F_R) \
+  do_intrinsic(_weakCompareAndSwapLongVolatile,   jdk_internal_misc_Unsafe,  weakCompareAndSwapLongVolatile_name,   compareAndSwapLong_signature,       F_R) \
   do_intrinsic(_weakCompareAndSwapInt,            jdk_internal_misc_Unsafe,  weakCompareAndSwapInt_name,            compareAndSwapInt_signature,        F_R) \
   do_intrinsic(_weakCompareAndSwapIntAcquire,     jdk_internal_misc_Unsafe,  weakCompareAndSwapIntAcquire_name,     compareAndSwapInt_signature,        F_R) \
   do_intrinsic(_weakCompareAndSwapIntRelease,     jdk_internal_misc_Unsafe,  weakCompareAndSwapIntRelease_name,     compareAndSwapInt_signature,        F_R) \
+  do_intrinsic(_weakCompareAndSwapIntVolatile,    jdk_internal_misc_Unsafe,  weakCompareAndSwapIntVolatile_name,    compareAndSwapInt_signature,        F_R) \
                                                                                                                         \
   do_intrinsic(_getAndAddInt,             jdk_internal_misc_Unsafe,     getAndAddInt_name, getAndAddInt_signature, F_R)       \
    do_name(     getAndAddInt_name,                                      "getAndAddInt")                                       \
--- a/hotspot/src/share/vm/jvmci/jvmciCompilerToVM.cpp	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/src/share/vm/jvmci/jvmciCompilerToVM.cpp	Wed May 11 00:31:28 2016 +0300
@@ -154,6 +154,14 @@
 
 int CompilerToVM::Data::vm_page_size;
 
+address CompilerToVM::Data::dsin;
+address CompilerToVM::Data::dcos;
+address CompilerToVM::Data::dtan;
+address CompilerToVM::Data::dexp;
+address CompilerToVM::Data::dlog;
+address CompilerToVM::Data::dlog10;
+address CompilerToVM::Data::dpow;
+
 void CompilerToVM::Data::initialize() {
   Klass_vtable_start_offset = in_bytes(Klass::vtable_start_offset());
   Klass_vtable_length_offset = in_bytes(Klass::vtable_length_offset());
@@ -205,6 +213,23 @@
   }
 
   vm_page_size = os::vm_page_size();
+
+#define SET_TRIGFUNC(name)                                      \
+  if (StubRoutines::name() != NULL) {                           \
+    name = StubRoutines::name();                                \
+  } else {                                                      \
+    name = CAST_FROM_FN_PTR(address, SharedRuntime::name);      \
+  }
+
+  SET_TRIGFUNC(dsin);
+  SET_TRIGFUNC(dcos);
+  SET_TRIGFUNC(dtan);
+  SET_TRIGFUNC(dexp);
+  SET_TRIGFUNC(dlog10);
+  SET_TRIGFUNC(dlog);
+  SET_TRIGFUNC(dpow);
+
+#undef SET_TRIGFUNC
 }
 
 /**
--- a/hotspot/src/share/vm/jvmci/jvmciCompilerToVM.hpp	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/src/share/vm/jvmci/jvmciCompilerToVM.hpp	Wed May 11 00:31:28 2016 +0300
@@ -65,6 +65,14 @@
 
     static int vm_page_size;
 
+    static address dsin;
+    static address dcos;
+    static address dtan;
+    static address dexp;
+    static address dlog;
+    static address dlog10;
+    static address dpow;
+
    public:
     static void initialize();
   };
--- a/hotspot/src/share/vm/jvmci/vmStructs_jvmci.cpp	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/src/share/vm/jvmci/vmStructs_jvmci.cpp	Wed May 11 00:31:28 2016 +0300
@@ -76,6 +76,14 @@
                                                                                                                                      \
   static_field(CompilerToVM::Data,             vm_page_size,                           int)                                          \
                                                                                                                                      \
+  static_field(CompilerToVM::Data,             dsin,                                   address)                                      \
+  static_field(CompilerToVM::Data,             dcos,                                   address)                                      \
+  static_field(CompilerToVM::Data,             dtan,                                   address)                                      \
+  static_field(CompilerToVM::Data,             dexp,                                   address)                                      \
+  static_field(CompilerToVM::Data,             dlog,                                   address)                                      \
+  static_field(CompilerToVM::Data,             dlog10,                                 address)                                      \
+  static_field(CompilerToVM::Data,             dpow,                                   address)                                      \
+                                                                                                                                     \
   static_field(Abstract_VM_Version,            _features,                              uint64_t)                                     \
                                                                                                                                      \
   nonstatic_field(Array<int>,                  _length,                                int)                                          \
@@ -524,13 +532,6 @@
   declare_function(SharedRuntime::exception_handler_for_return_address)   \
   declare_function(SharedRuntime::OSR_migration_end)                      \
   declare_function(SharedRuntime::enable_stack_reserved_zone)             \
-  declare_function(SharedRuntime::dsin)                                   \
-  declare_function(SharedRuntime::dcos)                                   \
-  declare_function(SharedRuntime::dtan)                                   \
-  declare_function(SharedRuntime::dexp)                                   \
-  declare_function(SharedRuntime::dlog)                                   \
-  declare_function(SharedRuntime::dlog10)                                 \
-  declare_function(SharedRuntime::dpow)                                   \
                                                                           \
   declare_function(os::dll_load)                                          \
   declare_function(os::dll_lookup)                                        \
--- a/hotspot/src/share/vm/opto/c2_globals.hpp	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/src/share/vm/opto/c2_globals.hpp	Wed May 11 00:31:28 2016 +0300
@@ -589,26 +589,26 @@
   product(bool, BlockLayoutRotateLoops, true,                               \
           "Allow back branches to be fall throughs in the block layout")    \
                                                                             \
-  develop(bool, InlineReflectionGetCallerClass, true,                       \
+  diagnostic(bool, InlineReflectionGetCallerClass, true,                    \
           "inline sun.reflect.Reflection.getCallerClass(), known to be "    \
           "part of base library DLL")                                       \
                                                                             \
-  develop(bool, InlineObjectCopy, true,                                     \
+  diagnostic(bool, InlineObjectCopy, true,                                  \
           "inline Object.clone and Arrays.copyOf[Range] intrinsics")        \
                                                                             \
-  develop(bool, SpecialStringCompareTo, true,                               \
+  diagnostic(bool, SpecialStringCompareTo, true,                            \
           "special version of string compareTo")                            \
                                                                             \
-  develop(bool, SpecialStringIndexOf, true,                                 \
+  diagnostic(bool, SpecialStringIndexOf, true,                              \
           "special version of string indexOf")                              \
                                                                             \
-  develop(bool, SpecialStringEquals, true,                                  \
+  diagnostic(bool, SpecialStringEquals, true,                               \
           "special version of string equals")                               \
                                                                             \
-  develop(bool, SpecialArraysEquals, true,                                  \
+  diagnostic(bool, SpecialArraysEquals, true,                               \
           "special version of Arrays.equals(char[],char[])")                \
                                                                             \
-  product(bool, SpecialEncodeISOArray, true,                                \
+  diagnostic(bool, SpecialEncodeISOArray, true,                             \
           "special version of ISO_8859_1$Encoder.encodeISOArray")           \
                                                                             \
   develop(bool, BailoutToInterpreterForThrows, false,                       \
@@ -710,22 +710,22 @@
   diagnostic(bool, OptimizeExpensiveOps, true,                              \
           "Find best control for expensive operations")                     \
                                                                             \
-  product(bool, UseMathExactIntrinsics, true,                               \
+  diagnostic(bool, UseMathExactIntrinsics, true,                            \
           "Enables intrinsification of various java.lang.Math functions")   \
                                                                             \
-  product(bool, UseMultiplyToLenIntrinsic, false,                           \
+  diagnostic(bool, UseMultiplyToLenIntrinsic, false,                        \
           "Enables intrinsification of BigInteger.multiplyToLen()")         \
                                                                             \
-  product(bool, UseSquareToLenIntrinsic, false,                             \
+  diagnostic(bool, UseSquareToLenIntrinsic, false,                          \
           "Enables intrinsification of BigInteger.squareToLen()")           \
                                                                             \
-  product(bool, UseMulAddIntrinsic, false,                                  \
+  diagnostic(bool, UseMulAddIntrinsic, false,                               \
           "Enables intrinsification of BigInteger.mulAdd()")                \
                                                                             \
-  product(bool, UseMontgomeryMultiplyIntrinsic, false,                      \
+  diagnostic(bool, UseMontgomeryMultiplyIntrinsic, false,                   \
           "Enables intrinsification of BigInteger.montgomeryMultiply()")    \
                                                                             \
-  product(bool, UseMontgomerySquareIntrinsic, false,                        \
+  diagnostic(bool, UseMontgomerySquareIntrinsic, false,                     \
           "Enables intrinsification of BigInteger.montgomerySquare()")      \
                                                                             \
   product(bool, UseTypeSpeculation, true,                                   \
--- a/hotspot/src/share/vm/opto/compile.cpp	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/src/share/vm/opto/compile.cpp	Wed May 11 00:31:28 2016 +0300
@@ -2905,6 +2905,8 @@
       }
     }
 #endif
+    // platform dependent reshaping of the address expression
+    reshape_address(n->as_AddP());
     break;
   }
 
--- a/hotspot/src/share/vm/opto/compile.hpp	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/src/share/vm/opto/compile.hpp	Wed May 11 00:31:28 2016 +0300
@@ -44,6 +44,7 @@
 #include "trace/tracing.hpp"
 #include "utilities/ticks.hpp"
 
+class AddPNode;
 class Block;
 class Bundle;
 class C2Compiler;
@@ -579,6 +580,8 @@
   int                   _scratch_const_size;    // For temporary code buffers.
   bool                  _in_scratch_emit_size;  // true when in scratch_emit_size.
 
+  void reshape_address(AddPNode* n);
+
  public:
   // Accessors
 
--- a/hotspot/src/share/vm/opto/library_call.cpp	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/src/share/vm/opto/library_call.cpp	Wed May 11 00:31:28 2016 +0300
@@ -651,12 +651,15 @@
   case vmIntrinsics::_weakCompareAndSwapObject:         return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Relaxed);
   case vmIntrinsics::_weakCompareAndSwapObjectAcquire:  return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Acquire);
   case vmIntrinsics::_weakCompareAndSwapObjectRelease:  return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Release);
+  case vmIntrinsics::_weakCompareAndSwapObjectVolatile: return inline_unsafe_load_store(T_OBJECT, LS_cmp_swap_weak, Volatile);
   case vmIntrinsics::_weakCompareAndSwapInt:            return inline_unsafe_load_store(T_INT,    LS_cmp_swap_weak, Relaxed);
   case vmIntrinsics::_weakCompareAndSwapIntAcquire:     return inline_unsafe_load_store(T_INT,    LS_cmp_swap_weak, Acquire);
   case vmIntrinsics::_weakCompareAndSwapIntRelease:     return inline_unsafe_load_store(T_INT,    LS_cmp_swap_weak, Release);
+  case vmIntrinsics::_weakCompareAndSwapIntVolatile:    return inline_unsafe_load_store(T_INT,    LS_cmp_swap_weak, Volatile);
   case vmIntrinsics::_weakCompareAndSwapLong:           return inline_unsafe_load_store(T_LONG,   LS_cmp_swap_weak, Relaxed);
   case vmIntrinsics::_weakCompareAndSwapLongAcquire:    return inline_unsafe_load_store(T_LONG,   LS_cmp_swap_weak, Acquire);
   case vmIntrinsics::_weakCompareAndSwapLongRelease:    return inline_unsafe_load_store(T_LONG,   LS_cmp_swap_weak, Release);
+  case vmIntrinsics::_weakCompareAndSwapLongVolatile:   return inline_unsafe_load_store(T_LONG,   LS_cmp_swap_weak, Volatile);
 
   case vmIntrinsics::_compareAndExchangeObjectVolatile: return inline_unsafe_load_store(T_OBJECT, LS_cmp_exchange,  Volatile);
   case vmIntrinsics::_compareAndExchangeObjectAcquire:  return inline_unsafe_load_store(T_OBJECT, LS_cmp_exchange,  Acquire);
@@ -2431,9 +2434,10 @@
   bool requires_atomic_access = false;
   switch (kind) {
       case Relaxed:
-      case Opaque:
           requires_atomic_access = AlwaysAtomicAccesses;
           break;
+      case Opaque:
+          // Opaque accesses are atomic.
       case Acquire:
       case Release:
       case Volatile:
--- a/hotspot/src/share/vm/opto/matcher.cpp	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/src/share/vm/opto/matcher.cpp	Wed May 11 00:31:28 2016 +0300
@@ -963,44 +963,6 @@
 }
 #endif
 
-
-//------------------------------MStack-----------------------------------------
-// State and MStack class used in xform() and find_shared() iterative methods.
-enum Node_State { Pre_Visit,  // node has to be pre-visited
-                      Visit,  // visit node
-                 Post_Visit,  // post-visit node
-             Alt_Post_Visit   // alternative post-visit path
-                };
-
-class MStack: public Node_Stack {
-  public:
-    MStack(int size) : Node_Stack(size) { }
-
-    void push(Node *n, Node_State ns) {
-      Node_Stack::push(n, (uint)ns);
-    }
-    void push(Node *n, Node_State ns, Node *parent, int indx) {
-      ++_inode_top;
-      if ((_inode_top + 1) >= _inode_max) grow();
-      _inode_top->node = parent;
-      _inode_top->indx = (uint)indx;
-      ++_inode_top;
-      _inode_top->node = n;
-      _inode_top->indx = (uint)ns;
-    }
-    Node *parent() {
-      pop();
-      return node();
-    }
-    Node_State state() const {
-      return (Node_State)index();
-    }
-    void set_state(Node_State ns) {
-      set_index((uint)ns);
-    }
-};
-
-
 //------------------------------xform------------------------------------------
 // Given a Node in old-space, Match him (Label/Reduce) to produce a machine
 // Node in new-space.  Given a new-space Node, recursively walk his children.
@@ -2046,37 +2008,22 @@
 }
 #endif // X86
 
-// A method-klass-holder may be passed in the inline_cache_reg
-// and then expanded into the inline_cache_reg and a method_oop register
-//   defined in ad_<arch>.cpp
-
-// Check for shift by small constant as well
-static bool clone_shift(Node* shift, Matcher* matcher, MStack& mstack, VectorSet& address_visited) {
-  if (shift->Opcode() == Op_LShiftX && shift->in(2)->is_Con() &&
-      shift->in(2)->get_int() <= 3 &&
-      // Are there other uses besides address expressions?
-      !matcher->is_visited(shift)) {
-    address_visited.set(shift->_idx); // Flag as address_visited
-    mstack.push(shift->in(2), Visit);
-    Node *conv = shift->in(1);
-#ifdef _LP64
-    // Allow Matcher to match the rule which bypass
-    // ConvI2L operation for an array index on LP64
-    // if the index value is positive.
-    if (conv->Opcode() == Op_ConvI2L &&
-        conv->as_Type()->type()->is_long()->_lo >= 0 &&
-        // Are there other uses besides address expressions?
-        !matcher->is_visited(conv)) {
-      address_visited.set(conv->_idx); // Flag as address_visited
-      mstack.push(conv->in(1), Pre_Visit);
-    } else
-#endif
-      mstack.push(conv, Pre_Visit);
+bool Matcher::clone_base_plus_offset_address(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
+  Node *off = m->in(AddPNode::Offset);
+  if (off->is_Con()) {
+    address_visited.test_set(m->_idx); // Flag as address_visited
+    mstack.push(m->in(AddPNode::Address), Pre_Visit);
+    // Clone X+offset as it also folds into most addressing expressions
+    mstack.push(off, Visit);
+    mstack.push(m->in(AddPNode::Base), Pre_Visit);
     return true;
   }
   return false;
 }
 
+// A method-klass-holder may be passed in the inline_cache_reg
+// and then expanded into the inline_cache_reg and a method_oop register
+//   defined in ad_<arch>.cpp
 
 //------------------------------find_shared------------------------------------
 // Set bits if Node is shared or otherwise a root
@@ -2251,40 +2198,9 @@
           // But they should be marked as shared if there are other uses
           // besides address expressions.
 
-          Node *off = m->in(AddPNode::Offset);
-          if (off->is_Con()) {
-            address_visited.test_set(m->_idx); // Flag as address_visited
-            Node *adr = m->in(AddPNode::Address);
-
-            // Intel, ARM and friends can handle 2 adds in addressing mode
-            if( clone_shift_expressions && adr->is_AddP() &&
-                // AtomicAdd is not an addressing expression.
-                // Cheap to find it by looking for screwy base.
-                !adr->in(AddPNode::Base)->is_top() &&
-                // Are there other uses besides address expressions?
-                !is_visited(adr) ) {
-              address_visited.set(adr->_idx); // Flag as address_visited
-              Node *shift = adr->in(AddPNode::Offset);
-              if (!clone_shift(shift, this, mstack, address_visited)) {
-                mstack.push(shift, Pre_Visit);
-              }
-              mstack.push(adr->in(AddPNode::Address), Pre_Visit);
-              mstack.push(adr->in(AddPNode::Base), Pre_Visit);
-            } else {  // Sparc, Alpha, PPC and friends
-              mstack.push(adr, Pre_Visit);
-            }
-
-            // Clone X+offset as it also folds into most addressing expressions
-            mstack.push(off, Visit);
-            mstack.push(m->in(AddPNode::Base), Pre_Visit);
-            continue; // for(int i = ...)
-          } else if (clone_shift_expressions &&
-                     clone_shift(off, this, mstack, address_visited)) {
-              address_visited.test_set(m->_idx); // Flag as address_visited
-              mstack.push(m->in(AddPNode::Address), Pre_Visit);
-              mstack.push(m->in(AddPNode::Base), Pre_Visit);
-              continue;
-          } // if( off->is_Con() )
+          if (clone_address_expressions(m->as_AddP(), mstack, address_visited)) {
+            continue;
+          }
         }   // if( mem_op &&
         mstack.push(m, Pre_Visit);
       }     // for(int i = ...)
--- a/hotspot/src/share/vm/opto/matcher.hpp	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/src/share/vm/opto/matcher.hpp	Wed May 11 00:31:28 2016 +0300
@@ -40,6 +40,45 @@
 //---------------------------Matcher-------------------------------------------
 class Matcher : public PhaseTransform {
   friend class VMStructs;
+
+public:
+
+  // State and MStack class used in xform() and find_shared() iterative methods.
+  enum Node_State { Pre_Visit,  // node has to be pre-visited
+                    Visit,  // visit node
+                    Post_Visit,  // post-visit node
+                    Alt_Post_Visit   // alternative post-visit path
+  };
+
+  class MStack: public Node_Stack {
+  public:
+    MStack(int size) : Node_Stack(size) { }
+
+    void push(Node *n, Node_State ns) {
+      Node_Stack::push(n, (uint)ns);
+    }
+    void push(Node *n, Node_State ns, Node *parent, int indx) {
+      ++_inode_top;
+      if ((_inode_top + 1) >= _inode_max) grow();
+      _inode_top->node = parent;
+      _inode_top->indx = (uint)indx;
+      ++_inode_top;
+      _inode_top->node = n;
+      _inode_top->indx = (uint)ns;
+    }
+    Node *parent() {
+      pop();
+      return node();
+    }
+    Node_State state() const {
+      return (Node_State)index();
+    }
+    void set_state(Node_State ns) {
+      set_index((uint)ns);
+    }
+  };
+
+private:
   // Private arena of State objects
   ResourceArea _states_arena;
 
@@ -411,7 +450,9 @@
   // Should the Matcher clone shifts on addressing modes, expecting them to
   // be subsumed into complex addressing expressions or compute them into
   // registers?  True for Intel but false for most RISCs
-  static const bool clone_shift_expressions;
+  bool clone_address_expressions(AddPNode* m, MStack& mstack, VectorSet& address_visited);
+  // Clone base + offset address expression
+  bool clone_base_plus_offset_address(AddPNode* m, MStack& mstack, VectorSet& address_visited);
 
   static bool narrow_oop_use_complex_address();
   static bool narrow_klass_use_complex_address();
--- a/hotspot/src/share/vm/runtime/globals.hpp	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Wed May 11 00:31:28 2016 +0300
@@ -732,7 +732,7 @@
           "Control whether SHA instructions can be used "                   \
           "on SPARC, on ARM and on x86")                                    \
                                                                             \
-  product(bool, UseGHASHIntrinsics, false,                                  \
+  diagnostic(bool, UseGHASHIntrinsics, false,                               \
           "Use intrinsics for GHASH versions of crypto")                    \
                                                                             \
   product(size_t, LargePageSizeInBytes, 0,                                  \
@@ -802,27 +802,27 @@
   product(bool, UseInlineCaches, true,                                      \
           "Use Inline Caches for virtual calls ")                           \
                                                                             \
-  develop(bool, InlineArrayCopy, true,                                      \
+  diagnostic(bool, InlineArrayCopy, true,                                   \
           "Inline arraycopy native that is known to be part of "            \
           "base library DLL")                                               \
                                                                             \
-  develop(bool, InlineObjectHash, true,                                     \
+  diagnostic(bool, InlineObjectHash, true,                                  \
           "Inline Object::hashCode() native that is known to be part "      \
           "of base library DLL")                                            \
                                                                             \
-  develop(bool, InlineNatives, true,                                        \
+  diagnostic(bool, InlineNatives, true,                                     \
           "Inline natives that are known to be part of base library DLL")   \
                                                                             \
-  develop(bool, InlineMathNatives, true,                                    \
+  diagnostic(bool, InlineMathNatives, true,                                 \
           "Inline SinD, CosD, etc.")                                        \
                                                                             \
-  develop(bool, InlineClassNatives, true,                                   \
+  diagnostic(bool, InlineClassNatives, true,                                \
           "Inline Class.isInstance, etc")                                   \
                                                                             \
-  develop(bool, InlineThreadNatives, true,                                  \
+  diagnostic(bool, InlineThreadNatives, true,                               \
           "Inline Thread.currentThread, etc")                               \
                                                                             \
-  develop(bool, InlineUnsafeOps, true,                                      \
+  diagnostic(bool, InlineUnsafeOps, true,                                   \
           "Inline memory ops (native methods) from Unsafe")                 \
                                                                             \
   product(bool, CriticalJNINatives, true,                                   \
@@ -831,34 +831,34 @@
   notproduct(bool, StressCriticalJNINatives, false,                         \
           "Exercise register saving code in critical natives")              \
                                                                             \
-  product(bool, UseAESIntrinsics, false,                                    \
+  diagnostic(bool, UseAESIntrinsics, false,                                 \
           "Use intrinsics for AES versions of crypto")                      \
                                                                             \
-  product(bool, UseAESCTRIntrinsics, false,                                 \
+  diagnostic(bool, UseAESCTRIntrinsics, false,                              \
           "Use intrinsics for the paralleled version of AES/CTR crypto")    \
                                                                             \
-  product(bool, UseSHA1Intrinsics, false,                                   \
+  diagnostic(bool, UseSHA1Intrinsics, false,                                \
           "Use intrinsics for SHA-1 crypto hash function. "                 \
           "Requires that UseSHA is enabled.")                               \
                                                                             \
-  product(bool, UseSHA256Intrinsics, false,                                 \
+  diagnostic(bool, UseSHA256Intrinsics, false,                              \
           "Use intrinsics for SHA-224 and SHA-256 crypto hash functions. "  \
           "Requires that UseSHA is enabled.")                               \
                                                                             \
-  product(bool, UseSHA512Intrinsics, false,                                 \
+  diagnostic(bool, UseSHA512Intrinsics, false,                              \
           "Use intrinsics for SHA-384 and SHA-512 crypto hash functions. "  \
           "Requires that UseSHA is enabled.")                               \
                                                                             \
-  product(bool, UseCRC32Intrinsics, false,                                  \
+  diagnostic(bool, UseCRC32Intrinsics, false,                               \
           "use intrinsics for java.util.zip.CRC32")                         \
                                                                             \
-  product(bool, UseCRC32CIntrinsics, false,                                 \
+  diagnostic(bool, UseCRC32CIntrinsics, false,                              \
           "use intrinsics for java.util.zip.CRC32C")                        \
                                                                             \
-  product(bool, UseAdler32Intrinsics, false,                                \
+  diagnostic(bool, UseAdler32Intrinsics, false,                             \
           "use intrinsics for java.util.zip.Adler32")                       \
                                                                             \
-  product(bool, UseVectorizedMismatchIntrinsic, false,                      \
+  diagnostic(bool, UseVectorizedMismatchIntrinsic, false,                   \
           "Enables intrinsification of ArraysSupport.vectorizedMismatch()") \
                                                                             \
   diagnostic(ccstrlist, DisableIntrinsic, "",                               \
--- a/hotspot/test/compiler/cpuflags/AESIntrinsicsBase.java	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/test/compiler/cpuflags/AESIntrinsicsBase.java	Wed May 11 00:31:28 2016 +0300
@@ -40,10 +40,11 @@
     public static final String USE_AES_INTRINSICS = "UseAESIntrinsics";
     public static final String USE_SSE = "UseSSE";
     public static final String USE_VIS = "UseVIS";
+    public static final String[] USE_DIAGNOSTIC_CMD
+            = {"-XX:+UnlockDiagnosticVMOptions", "-XX:+PrintIntrinsics"};
     public static final String[] TEST_AES_CMD
             = {"-XX:+IgnoreUnrecognizedVMOptions", "-XX:+PrintFlagsFinal",
-            "-Xbatch","-XX:+UnlockDiagnosticVMOptions",
-            "-XX:+PrintIntrinsics", "-DcheckOutput=true", "-Dmode=CBC",
+            "-Xbatch", "-DcheckOutput=true", "-Dmode=CBC",
             "TestAESMain"};
 
     protected AESIntrinsicsBase(BooleanSupplier predicate) {
@@ -52,14 +53,18 @@
 
     /**
      * Prepares command for TestAESMain execution.
+     * Intrinsics flags are of diagnostic type
+     * and must be preceded by UnlockDiagnosticVMOptions.
      * @param args flags that must be added to command
      * @return command for TestAESMain execution
      */
     public static String[] prepareArguments(String... args) {
-        String[] command = Arrays.copyOf(args, TEST_AES_CMD.length
-                + args.length);
-        System.arraycopy(TEST_AES_CMD, 0, command, args.length,
-                TEST_AES_CMD.length);
+        String[] command = Arrays.copyOf(USE_DIAGNOSTIC_CMD, args.length
+                + USE_DIAGNOSTIC_CMD.length + TEST_AES_CMD.length);
+        System.arraycopy(args, 0, command, USE_DIAGNOSTIC_CMD.length,
+                args.length);
+        System.arraycopy(TEST_AES_CMD, 0, command, args.length
+                + USE_DIAGNOSTIC_CMD.length, TEST_AES_CMD.length);
         return command;
     }
 }
--- a/hotspot/test/compiler/intrinsics/muladd/TestMulAdd.java	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/test/compiler/intrinsics/muladd/TestMulAdd.java	Wed May 11 00:31:28 2016 +0300
@@ -28,7 +28,7 @@
  * @summary Add C2 x86 intrinsic for BigInteger::mulAdd() method
  *
  * @run main/othervm/timeout=600 -XX:-TieredCompilation -Xbatch
- *      -XX:+IgnoreUnrecognizedVMOptions -XX:-UseSquareToLenIntrinsic -XX:-UseMultiplyToLenIntrinsic
+ *      -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions -XX:-UseSquareToLenIntrinsic -XX:-UseMultiplyToLenIntrinsic
  *      -XX:CompileCommand=dontinline,TestMulAdd::main
  *      -XX:CompileCommand=option,TestMulAdd::base_multiply,ccstr,DisableIntrinsic,_mulAdd
  *      -XX:CompileCommand=option,java.math.BigInteger::multiply,ccstr,DisableIntrinsic,_mulAdd
--- a/hotspot/test/compiler/intrinsics/sha/cli/SHAOptionsBase.java	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/test/compiler/intrinsics/sha/cli/SHAOptionsBase.java	Wed May 11 00:31:28 2016 +0300
@@ -42,6 +42,11 @@
     protected static final String USE_SHA512_INTRINSICS_OPTION
             = "UseSHA512Intrinsics";
 
+    // Intrinsics flags are of diagnostic type
+    // and must be preceded by UnlockDiagnosticVMOptions.
+    protected static final String UNLOCK_DIAGNOSTIC_VM_OPTIONS
+            = "-XX:+UnlockDiagnosticVMOptions";
+
     // Note that strings below will be passed to
     // CommandLineOptionTest.verifySameJVMStartup and thus are regular
     // expressions, not just a plain strings.
--- a/hotspot/test/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForOtherCPU.java	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/test/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForOtherCPU.java	Wed May 11 00:31:28 2016 +0300
@@ -50,11 +50,13 @@
         CommandLineOptionTest.verifySameJVMStartup(null,
                 new String[] { ".*" + optionName + ".*" }, shouldPassMessage,
                 shouldPassMessage, ExitCode.OK,
+                SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
                 CommandLineOptionTest.prepareBooleanFlag(optionName, true));
 
         CommandLineOptionTest.verifySameJVMStartup(null,
                 new String[] { ".*" + optionName + ".*" }, shouldPassMessage,
                 shouldPassMessage, ExitCode.OK,
+                SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
                 CommandLineOptionTest.prepareBooleanFlag(optionName, false));
     }
 
@@ -63,13 +65,15 @@
         // Verify that option is disabled by default.
         CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false",
                 String.format("Option '%s' should be disabled by default",
-                        optionName));
+                        optionName),
+                SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS);
 
         // Verify that option is disabled even if it was explicitly enabled
         // using CLI options.
         CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false",
                 String.format("Option '%s' should be off on unsupported "
                         + "CPU even if set to true directly", optionName),
+                SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
                 CommandLineOptionTest.prepareBooleanFlag(optionName, true));
 
         // Verify that option is disabled when it explicitly disabled
@@ -79,6 +83,7 @@
                         + " even if '%s' flag set to JVM", optionName,
                         CommandLineOptionTest.prepareBooleanFlag(
                         SHAOptionsBase.USE_SHA_OPTION, true)),
+                SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
                 CommandLineOptionTest.prepareBooleanFlag(optionName, false));
     }
 }
--- a/hotspot/test/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForSupportedCPU.java	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/test/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForSupportedCPU.java	Wed May 11 00:31:28 2016 +0300
@@ -49,6 +49,7 @@
         CommandLineOptionTest.verifySameJVMStartup(null, new String[] {
                         SHAOptionsBase.getWarningForUnsupportedCPU(optionName)
                 }, shouldPassMessage, shouldPassMessage, ExitCode.OK,
+                SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
                 CommandLineOptionTest.prepareBooleanFlag(optionName, true));
 
         // Verify that option could be disabled even if +UseSHA was passed to
@@ -60,6 +61,7 @@
                         optionName, CommandLineOptionTest.prepareBooleanFlag(
                             SHAOptionsBase.USE_SHA_OPTION, true)),
                 ExitCode.OK,
+                SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
                 CommandLineOptionTest.prepareBooleanFlag(
                         SHAOptionsBase.USE_SHA_OPTION, true),
                 CommandLineOptionTest.prepareBooleanFlag(optionName, false));
@@ -75,6 +77,7 @@
                                   optionName,
                                   CommandLineOptionTest.prepareBooleanFlag(SHAOptionsBase.USE_SHA_OPTION, false)),
                     ExitCode.OK,
+                    SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
                     CommandLineOptionTest.prepareBooleanFlag(SHAOptionsBase.USE_SHA_OPTION, false),
                     CommandLineOptionTest.prepareBooleanFlag(optionName, true));
         }
@@ -86,18 +89,21 @@
 
         CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "true",
                 String.format("Option '%s' should be enabled by default",
-                        optionName));
+                        optionName),
+                SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS);
 
         // Verify that it is possible to explicitly enable the option.
         CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "true",
                 String.format("Option '%s' was set to have value 'true'",
                         optionName),
+                SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
                 CommandLineOptionTest.prepareBooleanFlag(optionName, true));
 
         // Verify that it is possible to explicitly disable the option.
         CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false",
                 String.format("Option '%s' was set to have value 'false'",
                         optionName),
+                SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
                 CommandLineOptionTest.prepareBooleanFlag(optionName, false));
 
         // verify that option is disabled when -UseSHA was passed to JVM.
@@ -106,6 +112,7 @@
                         + " flag set to JVM", optionName,
                         CommandLineOptionTest.prepareBooleanFlag(
                             SHAOptionsBase.USE_SHA_OPTION, false)),
+                SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
                 CommandLineOptionTest.prepareBooleanFlag(optionName, true),
                 CommandLineOptionTest.prepareBooleanFlag(
                         SHAOptionsBase.USE_SHA_OPTION, false));
@@ -117,6 +124,7 @@
                         + " even if %s flag set to JVM", optionName,
                         CommandLineOptionTest.prepareBooleanFlag(
                             SHAOptionsBase.USE_SHA_OPTION, true)),
+                SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
                 CommandLineOptionTest.prepareBooleanFlag(
                         SHAOptionsBase.USE_SHA_OPTION, true),
                 CommandLineOptionTest.prepareBooleanFlag(optionName, false));
--- a/hotspot/test/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForUnsupportedAArch64CPU.java	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/test/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForUnsupportedAArch64CPU.java	Wed May 11 00:31:28 2016 +0300
@@ -47,6 +47,7 @@
         CommandLineOptionTest.verifySameJVMStartup(null, new String[] {
                         SHAOptionsBase.getWarningForUnsupportedCPU(optionName)
                 }, shouldPassMessage, shouldPassMessage, ExitCode.OK,
+                SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
                 CommandLineOptionTest.prepareBooleanFlag(optionName, false));
 
         shouldPassMessage = String.format("If JVM is started with '-XX:-"
@@ -62,6 +63,7 @@
                     shouldPassMessage,
                     shouldPassMessage,
                     ExitCode.OK,
+                    SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
                     CommandLineOptionTest.prepareBooleanFlag(SHAOptionsBase.USE_SHA_OPTION, false),
                     CommandLineOptionTest.prepareBooleanFlag(optionName, true));
         }
@@ -72,13 +74,15 @@
         // Verify that option is disabled by default.
         CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false",
                 String.format("Option '%s' should be disabled by default",
-                        optionName));
+                        optionName),
+                SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS);
 
         // Verify that option is disabled even if it was explicitly enabled
         // using CLI options.
         CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false",
                 String.format("Option '%s' should be off on unsupported "
                         + "AArch64CPU even if set to true directly", optionName),
+                SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
                 CommandLineOptionTest.prepareBooleanFlag(optionName, true));
 
         // Verify that option is disabled when +UseSHA was passed to JVM.
@@ -87,6 +91,7 @@
                         + "AArch64CPU even if %s flag set to JVM",
                         optionName, CommandLineOptionTest.prepareBooleanFlag(
                             SHAOptionsBase.USE_SHA_OPTION, true)),
+                SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
                 CommandLineOptionTest.prepareBooleanFlag(
                         SHAOptionsBase.USE_SHA_OPTION, true));
     }
--- a/hotspot/test/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForUnsupportedSparcCPU.java	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/test/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForUnsupportedSparcCPU.java	Wed May 11 00:31:28 2016 +0300
@@ -47,6 +47,7 @@
         CommandLineOptionTest.verifySameJVMStartup(null, new String[] {
                         SHAOptionsBase.getWarningForUnsupportedCPU(optionName)
                 }, shouldPassMessage, shouldPassMessage, ExitCode.OK,
+                SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
                 CommandLineOptionTest.prepareBooleanFlag(optionName, false));
 
         // Verify that when the tested option is enabled, then
@@ -58,6 +59,7 @@
                     shouldPassMessage,
                     shouldPassMessage,
                     ExitCode.OK,
+                    SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
                     CommandLineOptionTest.prepareBooleanFlag(SHAOptionsBase.USE_SHA_OPTION, false),
                     CommandLineOptionTest.prepareBooleanFlag(optionName, true));
         }
@@ -68,13 +70,15 @@
         // Verify that option is disabled by default.
         CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false",
                 String.format("Option '%s' should be disabled by default",
-                        optionName));
+                        optionName),
+                SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS);
 
         // Verify that option is disabled even if it was explicitly enabled
         // using CLI options.
         CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false",
                 String.format("Option '%s' should be off on unsupported "
                         + "SparcCPU even if set to true directly", optionName),
+                SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
                 CommandLineOptionTest.prepareBooleanFlag(optionName, true));
 
         // Verify that option is disabled when +UseSHA was passed to JVM.
@@ -83,6 +87,7 @@
                         + "SparcCPU even if %s flag set to JVM",
                         optionName, CommandLineOptionTest.prepareBooleanFlag(
                             SHAOptionsBase.USE_SHA_OPTION, true)),
+                SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
                 CommandLineOptionTest.prepareBooleanFlag(
                         SHAOptionsBase.USE_SHA_OPTION, true));
     }
--- a/hotspot/test/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForUnsupportedX86CPU.java	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/test/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForUnsupportedX86CPU.java	Wed May 11 00:31:28 2016 +0300
@@ -49,6 +49,7 @@
         CommandLineOptionTest.verifySameJVMStartup(null, new String[] {
                         SHAOptionsBase.getWarningForUnsupportedCPU(optionName)
                 }, shouldPassMessage, shouldPassMessage, ExitCode.OK,
+                SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
                 CommandLineOptionTest.prepareBooleanFlag(optionName, false));
 
         // Verify that when the tested option is enabled, then
@@ -60,6 +61,7 @@
                     shouldPassMessage,
                     shouldPassMessage,
                     ExitCode.OK,
+                    SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
                     CommandLineOptionTest.prepareBooleanFlag(SHAOptionsBase.USE_SHA_OPTION, false),
                     CommandLineOptionTest.prepareBooleanFlag(optionName, true));
         }
@@ -70,12 +72,14 @@
         // Verify that the tested option is disabled by default.
         CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false",
                 String.format("Option '%s' should be disabled by default",
-                        optionName));
+                        optionName),
+                SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS);
 
         // Verify that it is not possible to explicitly enable the option.
         CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false",
                 String.format("Option '%s' should be off on unsupported "
                         + "X86CPU even if set to true directly", optionName),
+                SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
                 CommandLineOptionTest.prepareBooleanFlag(optionName, true));
 
         // Verify that the tested option is disabled even if +UseSHA was passed
@@ -85,6 +89,7 @@
                         + "X86CPU even if %s flag set to JVM",
                         optionName, CommandLineOptionTest.prepareBooleanFlag(
                             SHAOptionsBase.USE_SHA_OPTION, true)),
+                SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
                 CommandLineOptionTest.prepareBooleanFlag(
                         SHAOptionsBase.USE_SHA_OPTION, true));
     }
--- a/hotspot/test/compiler/intrinsics/sha/cli/testcases/UseSHAIntrinsicsSpecificTestCaseForUnsupportedCPU.java	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/test/compiler/intrinsics/sha/cli/testcases/UseSHAIntrinsicsSpecificTestCaseForUnsupportedCPU.java	Wed May 11 00:31:28 2016 +0300
@@ -59,6 +59,7 @@
         CommandLineOptionTest.verifySameJVMStartup(new String[] {
                         SHAOptionsBase.getWarningForUnsupportedCPU(optionName)
                 }, null, shouldPassMessage, shouldPassMessage, ExitCode.OK,
+                SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
                 CommandLineOptionTest.prepareBooleanFlag(optionName, true));
     }
 }
--- a/hotspot/test/compiler/intrinsics/sha/cli/testcases/UseSHASpecificTestCaseForSupportedCPU.java	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/test/compiler/intrinsics/sha/cli/testcases/UseSHASpecificTestCaseForSupportedCPU.java	Wed May 11 00:31:28 2016 +0300
@@ -57,6 +57,7 @@
         CommandLineOptionTest.verifySameJVMStartup(
                 null, new String[] { ".*UseSHA.*" }, shouldPassMessage,
                 shouldPassMessage, ExitCode.OK,
+                SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
                 CommandLineOptionTest.prepareBooleanFlag(
                         SHAOptionsBase.USE_SHA_OPTION, true),
                 CommandLineOptionTest.prepareBooleanFlag(
@@ -75,6 +76,7 @@
                 SHAOptionsBase.USE_SHA_OPTION, "false", String.format(
                 "'%s' option should be disabled when all UseSHA*Intrinsics are"
                         + " disabled", SHAOptionsBase.USE_SHA_OPTION),
+                SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
                 CommandLineOptionTest.prepareBooleanFlag(
                         SHAOptionsBase.USE_SHA1_INTRINSICS_OPTION, false),
                 CommandLineOptionTest.prepareBooleanFlag(
@@ -91,6 +93,7 @@
                         + "to JVM", SHAOptionsBase.USE_SHA_OPTION,
                         CommandLineOptionTest.prepareBooleanFlag(
                              SHAOptionsBase.USE_SHA_OPTION, true)),
+                SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
                 CommandLineOptionTest.prepareBooleanFlag(
                         SHAOptionsBase.USE_SHA_OPTION, true),
                 CommandLineOptionTest.prepareBooleanFlag(
@@ -109,6 +112,7 @@
                         SHAOptionsBase.USE_SHA_OPTION,
                         CommandLineOptionTest.prepareBooleanFlag(
                             SHAOptionsBase.USE_SHA_OPTION, false)),
+                SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
                 CommandLineOptionTest.prepareBooleanFlag(
                         SHAOptionsBase.USE_SHA_OPTION, false),
                 CommandLineOptionTest.prepareBooleanFlag(
--- a/hotspot/test/compiler/intrinsics/sha/cli/testcases/UseSHASpecificTestCaseForUnsupportedCPU.java	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/test/compiler/intrinsics/sha/cli/testcases/UseSHASpecificTestCaseForUnsupportedCPU.java	Wed May 11 00:31:28 2016 +0300
@@ -68,6 +68,7 @@
                     "%s option should be disabled on unsupported CPU"
                         + " even if all UseSHA*Intrinsics options were enabled.",
                     SHAOptionsBase.USE_SHA_OPTION),
+                SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
                 CommandLineOptionTest.prepareBooleanFlag(
                         SHAOptionsBase.USE_SHA1_INTRINSICS_OPTION, true),
                 CommandLineOptionTest.prepareBooleanFlag(
@@ -84,6 +85,7 @@
                         + " and %s was enabled as well",
                     SHAOptionsBase.USE_SHA_OPTION,
                     SHAOptionsBase.USE_SHA_OPTION),
+                SHAOptionsBase.UNLOCK_DIAGNOSTIC_VM_OPTIONS,
                 CommandLineOptionTest.prepareBooleanFlag(
                         SHAOptionsBase.USE_SHA_OPTION, true),
                 CommandLineOptionTest.prepareBooleanFlag(
--- a/hotspot/test/compiler/runtime/6859338/Test6859338.java	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/test/compiler/runtime/6859338/Test6859338.java	Wed May 11 00:31:28 2016 +0300
@@ -27,7 +27,7 @@
  * @bug 6859338
  * @summary Assertion failure in sharedRuntime.cpp
  *
- * @run main/othervm -Xcomp -XX:+IgnoreUnrecognizedVMOptions  -XX:-InlineObjectHash -Xbatch -XX:-ProfileInterpreter Test6859338
+ * @run main/othervm -Xcomp -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions -XX:-InlineObjectHash -Xbatch -XX:-ProfileInterpreter Test6859338
  */
 
 public class Test6859338 {
--- a/hotspot/test/compiler/unsafe/JdkInternalMiscUnsafeAccessTestInt.java	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/test/compiler/unsafe/JdkInternalMiscUnsafeAccessTestInt.java	Wed May 11 00:31:28 2016 +0300
@@ -281,6 +281,18 @@
             assertEquals(x, 2, "weakCompareAndSwapRelease int");
         }
 
+        {
+            boolean success = false;
+            for (int c = 0; c < WEAK_ATTEMPTS && !success; c++) {
+                success = UNSAFE.weakCompareAndSwapIntVolatile(base, offset, 2, 1);
+            }
+            assertEquals(success, true, "weakCompareAndSwapVolatile int");
+            int x = UNSAFE.getInt(base, offset);
+            assertEquals(x, 1, "weakCompareAndSwapVolatile int");
+        }
+
+        UNSAFE.putInt(base, offset, 2);
+
         // Compare set and get
         {
             int o = UNSAFE.getAndSetInt(base, offset, 1);
--- a/hotspot/test/compiler/unsafe/JdkInternalMiscUnsafeAccessTestLong.java	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/test/compiler/unsafe/JdkInternalMiscUnsafeAccessTestLong.java	Wed May 11 00:31:28 2016 +0300
@@ -281,6 +281,18 @@
             assertEquals(x, 2L, "weakCompareAndSwapRelease long");
         }
 
+        {
+            boolean success = false;
+            for (int c = 0; c < WEAK_ATTEMPTS && !success; c++) {
+                success = UNSAFE.weakCompareAndSwapLongVolatile(base, offset, 2L, 1L);
+            }
+            assertEquals(success, true, "weakCompareAndSwapVolatile long");
+            long x = UNSAFE.getLong(base, offset);
+            assertEquals(x, 1L, "weakCompareAndSwapVolatile long");
+        }
+
+        UNSAFE.putLong(base, offset, 2L);
+
         // Compare set and get
         {
             long o = UNSAFE.getAndSetLong(base, offset, 1L);
--- a/hotspot/test/compiler/unsafe/JdkInternalMiscUnsafeAccessTestObject.java	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/test/compiler/unsafe/JdkInternalMiscUnsafeAccessTestObject.java	Wed May 11 00:31:28 2016 +0300
@@ -234,6 +234,18 @@
             assertEquals(x, "bar", "weakCompareAndSwapRelease Object");
         }
 
+        {
+            boolean success = false;
+            for (int c = 0; c < WEAK_ATTEMPTS && !success; c++) {
+                success = UNSAFE.weakCompareAndSwapObjectVolatile(base, offset, "bar", "foo");
+            }
+            assertEquals(success, true, "weakCompareAndSwapVolatile Object");
+            Object x = UNSAFE.getObject(base, offset);
+            assertEquals(x, "foo", "weakCompareAndSwapVolatile Object");
+        }
+
+        UNSAFE.putObject(base, offset, "bar");
+
         // Compare set and get
         {
             Object o = UNSAFE.getAndSetObject(base, offset, "foo");
--- a/hotspot/test/compiler/unsafe/SunMiscUnsafeAccessTestInt.java	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/test/compiler/unsafe/SunMiscUnsafeAccessTestInt.java	Wed May 11 00:31:28 2016 +0300
@@ -183,6 +183,7 @@
             assertEquals(x, 2, "failing compareAndSwap int value");
         }
 
+        UNSAFE.putInt(base, offset, 2);
 
         // Compare set and get
         {
--- a/hotspot/test/compiler/unsafe/SunMiscUnsafeAccessTestLong.java	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/test/compiler/unsafe/SunMiscUnsafeAccessTestLong.java	Wed May 11 00:31:28 2016 +0300
@@ -183,6 +183,7 @@
             assertEquals(x, 2L, "failing compareAndSwap long value");
         }
 
+        UNSAFE.putLong(base, offset, 2L);
 
         // Compare set and get
         {
--- a/hotspot/test/compiler/unsafe/SunMiscUnsafeAccessTestObject.java	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/test/compiler/unsafe/SunMiscUnsafeAccessTestObject.java	Wed May 11 00:31:28 2016 +0300
@@ -154,6 +154,7 @@
             assertEquals(x, "bar", "failing compareAndSwap Object value");
         }
 
+        UNSAFE.putObject(base, offset, "bar");
 
         // Compare set and get
         {
--- a/hotspot/test/compiler/unsafe/X-UnsafeAccessTest.java.template	Mon May 09 15:46:12 2016 +0200
+++ b/hotspot/test/compiler/unsafe/X-UnsafeAccessTest.java.template	Wed May 11 00:31:28 2016 +0300
@@ -302,7 +302,19 @@
             $type$ x = UNSAFE.get$Type$(base, offset);
             assertEquals(x, $value2$, "weakCompareAndSwapRelease $type$");
         }
+
+        {
+            boolean success = false;
+            for (int c = 0; c < WEAK_ATTEMPTS && !success; c++) {
+                success = UNSAFE.weakCompareAndSwap$Type$Volatile(base, offset, $value2$, $value1$);
+            }
+            assertEquals(success, true, "weakCompareAndSwapVolatile $type$");
+            $type$ x = UNSAFE.get$Type$(base, offset);
+            assertEquals(x, $value1$, "weakCompareAndSwapVolatile $type$");
+        }
+
 #end[JdkInternalMisc]
+        UNSAFE.put$Type$(base, offset, $value2$);
 
         // Compare set and get
         {