Merge
authordcubed
Thu, 02 Jul 2015 14:20:36 -0700
changeset 31627 a7141b9a8e0f
parent 31523 b445c291bae3 (diff)
parent 31626 2be1dcd3b6ec (current diff)
child 31628 61d58c8bd374
Merge
hotspot/src/share/vm/runtime/arguments.cpp
hotspot/src/share/vm/runtime/globals.hpp
hotspot/src/share/vm/runtime/stubRoutines.cpp
hotspot/src/share/vm/runtime/vmStructs.cpp
hotspot/test/TEST.groups
--- a/hotspot/src/cpu/aarch64/vm/aarch64.ad	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/cpu/aarch64/vm/aarch64.ad	Thu Jul 02 14:20:36 2015 -0700
@@ -865,6 +865,42 @@
     V31, V31_H
 );
 
+// Class for all 64bit vector registers
+reg_class vectord_reg(
+    V0, V0_H,
+    V1, V1_H,
+    V2, V2_H,
+    V3, V3_H,
+    V4, V4_H,
+    V5, V5_H,
+    V6, V6_H,
+    V7, V7_H,
+    V8, V8_H,
+    V9, V9_H,
+    V10, V10_H,
+    V11, V11_H,
+    V12, V12_H,
+    V13, V13_H,
+    V14, V14_H,
+    V15, V15_H,
+    V16, V16_H,
+    V17, V17_H,
+    V18, V18_H,
+    V19, V19_H,
+    V20, V20_H,
+    V21, V21_H,
+    V22, V22_H,
+    V23, V23_H,
+    V24, V24_H,
+    V25, V25_H,
+    V26, V26_H,
+    V27, V27_H,
+    V28, V28_H,
+    V29, V29_H,
+    V30, V30_H,
+    V31, V31_H
+);
+
 // Class for all 128bit vector registers
 reg_class vectorx_reg(
     V0, V0_H, V0_J, V0_K,
@@ -2133,40 +2169,48 @@
 
   if (bottom_type()->isa_vect() != NULL) {
     uint len = 4;
+    uint ireg = ideal_reg();
+    assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
     if (cbuf) {
       MacroAssembler _masm(cbuf);
-      uint ireg = ideal_reg();
       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
-      assert(ireg == Op_VecX, "sanity");
       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
         // stack->stack
         int src_offset = ra_->reg2offset(src_lo);
         int dst_offset = ra_->reg2offset(dst_lo);
         assert((src_offset & 7) && (dst_offset & 7), "unaligned stack offset");
         len = 8;
-        if (src_offset < 512) {
-          __ ldp(rscratch1, rscratch2, Address(sp, src_offset));
-        } else {
+        if (ireg == Op_VecD) {
           __ ldr(rscratch1, Address(sp, src_offset));
-          __ ldr(rscratch2, Address(sp, src_offset+4));
-          len += 4;
-        }
-        if (dst_offset < 512) {
-          __ stp(rscratch1, rscratch2, Address(sp, dst_offset));
+          __ str(rscratch1, Address(sp, dst_offset));
         } else {
-          __ str(rscratch1, Address(sp, dst_offset));
-          __ str(rscratch2, Address(sp, dst_offset+4));
-          len += 4;
+          if (src_offset < 512) {
+            __ ldp(rscratch1, rscratch2, Address(sp, src_offset));
+          } else {
+            __ ldr(rscratch1, Address(sp, src_offset));
+            __ ldr(rscratch2, Address(sp, src_offset+4));
+            len += 4;
+          }
+          if (dst_offset < 512) {
+            __ stp(rscratch1, rscratch2, Address(sp, dst_offset));
+          } else {
+            __ str(rscratch1, Address(sp, dst_offset));
+            __ str(rscratch2, Address(sp, dst_offset+4));
+            len += 4;
+          }
         }
       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
-        __ orr(as_FloatRegister(Matcher::_regEncode[dst_lo]), __ T16B,
+        __ orr(as_FloatRegister(Matcher::_regEncode[dst_lo]),
+               ireg == Op_VecD ? __ T8B : __ T16B,
                as_FloatRegister(Matcher::_regEncode[src_lo]),
                as_FloatRegister(Matcher::_regEncode[src_lo]));
       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
-        __ str(as_FloatRegister(Matcher::_regEncode[src_lo]), __ Q,
+        __ str(as_FloatRegister(Matcher::_regEncode[src_lo]),
+               ireg == Op_VecD ? __ D : __ Q,
                Address(sp, ra_->reg2offset(dst_lo)));
       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
-        __ ldr(as_FloatRegister(Matcher::_regEncode[dst_lo]), __ Q,
+        __ ldr(as_FloatRegister(Matcher::_regEncode[dst_lo]),
+               ireg == Op_VecD ? __ D : __ Q,
                Address(sp, ra_->reg2offset(src_lo)));
       } else {
         ShouldNotReachHere();
@@ -2176,17 +2220,22 @@
         // stack->stack
         int src_offset = ra_->reg2offset(src_lo);
         int dst_offset = ra_->reg2offset(dst_lo);
-        if (src_offset < 512) {
-          st->print("ldp  rscratch1, rscratch2, [sp, #%d]", src_offset);
-        } else {
+        if (ireg == Op_VecD) {
           st->print("ldr  rscratch1, [sp, #%d]", src_offset);
-          st->print("\nldr  rscratch2, [sp, #%d]", src_offset+4);
-        }
-        if (dst_offset < 512) {
-          st->print("\nstp  rscratch1, rscratch2, [sp, #%d]", dst_offset);
+          st->print("str  rscratch1, [sp, #%d]", dst_offset);
         } else {
-          st->print("\nstr  rscratch1, [sp, #%d]", dst_offset);
-          st->print("\nstr  rscratch2, [sp, #%d]", dst_offset+4);
+          if (src_offset < 512) {
+            st->print("ldp  rscratch1, rscratch2, [sp, #%d]", src_offset);
+          } else {
+            st->print("ldr  rscratch1, [sp, #%d]", src_offset);
+            st->print("\nldr  rscratch2, [sp, #%d]", src_offset+4);
+          }
+          if (dst_offset < 512) {
+            st->print("\nstp  rscratch1, rscratch2, [sp, #%d]", dst_offset);
+          } else {
+            st->print("\nstr  rscratch1, [sp, #%d]", dst_offset);
+            st->print("\nstr  rscratch2, [sp, #%d]", dst_offset+4);
+          }
         }
         st->print("\t# vector spill, stack to stack");
       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
@@ -2638,17 +2687,22 @@
   return vector_width_in_bytes(bt)/type2aelembytes(bt);
 }
 const int Matcher::min_vector_size(const BasicType bt) {
-  //return (type2aelembytes(bt) == 1) ? 4 : 2;
-  // For the moment, only support 1 vector size, 128 bits
-  return max_vector_size(bt);
+//  For the moment limit the vector size to 8 bytes
+    int size = 8 / type2aelembytes(bt);
+    if (size < 2) size = 2;
+    return size;
 }
 
 // Vector ideal reg.
 const int Matcher::vector_ideal_reg(int len) {
-  return Op_VecX;
+  switch(len) {
+    case  8: return Op_VecD;
+    case 16: return Op_VecX;
+  }
+  ShouldNotReachHere();
+  return 0;
 }
 
-// Only lowest bits of xmm reg are used for vector shift count.
 const int Matcher::vector_shift_count_ideal_reg(int size) {
   return Op_VecX;
 }
@@ -2660,9 +2714,7 @@
 
 // x86 supports misaligned vectors store/load.
 const bool Matcher::misaligned_vectors_ok() {
-  // TODO fixme
-  // return !AlignVector; // can be changed by flag
-  return false;
+  return !AlignVector; // can be changed by flag
 }
 
 // false => size gets scaled to BytesPerLong, ok.
@@ -3073,13 +3125,13 @@
                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
   %}
 
-  enc_class aarch64_enc_ldrvS(vecX dst, memory mem) %{
+  enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{
     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
   %}
 
-  enc_class aarch64_enc_ldrvD(vecX dst, memory mem) %{
+  enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{
     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
     loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
@@ -3159,13 +3211,13 @@
                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
   %}
 
-  enc_class aarch64_enc_strvS(vecX src, memory mem) %{
+  enc_class aarch64_enc_strvS(vecD src, memory mem) %{
     FloatRegister src_reg = as_FloatRegister($src$$reg);
     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
   %}
 
-  enc_class aarch64_enc_strvD(vecX src, memory mem) %{
+  enc_class aarch64_enc_strvD(vecD src, memory mem) %{
     FloatRegister src_reg = as_FloatRegister($src$$reg);
     loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
@@ -5187,6 +5239,16 @@
   interface(REG_INTER);
 %}
 
+operand vecD()
+%{
+  constraint(ALLOC_IN_RC(vectord_reg));
+  match(VecD);
+
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
 operand vecX()
 %{
   constraint(ALLOC_IN_RC(vectorx_reg));
@@ -7402,6 +7464,96 @@
   ins_pipe(ialu_reg);
 %}
 
+//---------- Population Count Instructions -------------------------------------
+//
+
+instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
+  predicate(UsePopCountInstruction);
+  match(Set dst (PopCountI src));
+  effect(TEMP tmp);
+  ins_cost(INSN_COST * 13);
+
+  format %{ "movw   $src, $src\n\t"
+            "mov    $tmp, $src\t# vector (1D)\n\t"
+            "cnt    $tmp, $tmp\t# vector (8B)\n\t"
+            "addv   $tmp, $tmp\t# vector (8B)\n\t"
+            "mov    $dst, $tmp\t# vector (1D)" %}
+  ins_encode %{
+    __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
+    __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
+    __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
+    __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
+    __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
+  %}
+
+  ins_pipe(pipe_class_default);
+%}
+
+instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{
+  predicate(UsePopCountInstruction);
+  match(Set dst (PopCountI (LoadI mem)));
+  effect(TEMP tmp);
+  ins_cost(INSN_COST * 13);
+
+  format %{ "ldrs   $tmp, $mem\n\t"
+            "cnt    $tmp, $tmp\t# vector (8B)\n\t"
+            "addv   $tmp, $tmp\t# vector (8B)\n\t"
+            "mov    $dst, $tmp\t# vector (1D)" %}
+  ins_encode %{
+    FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
+    loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
+               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
+    __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
+    __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
+    __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
+  %}
+
+  ins_pipe(pipe_class_default);
+%}
+
+// Note: Long.bitCount(long) returns an int.
+instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
+  predicate(UsePopCountInstruction);
+  match(Set dst (PopCountL src));
+  effect(TEMP tmp);
+  ins_cost(INSN_COST * 13);
+
+  format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
+            "cnt    $tmp, $tmp\t# vector (8B)\n\t"
+            "addv   $tmp, $tmp\t# vector (8B)\n\t"
+            "mov    $dst, $tmp\t# vector (1D)" %}
+  ins_encode %{
+    __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register);
+    __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
+    __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
+    __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
+  %}
+
+  ins_pipe(pipe_class_default);
+%}
+
+instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{
+  predicate(UsePopCountInstruction);
+  match(Set dst (PopCountL (LoadL mem)));
+  effect(TEMP tmp);
+  ins_cost(INSN_COST * 13);
+
+  format %{ "ldrd   $tmp, $mem\n\t"
+            "cnt    $tmp, $tmp\t# vector (8B)\n\t"
+            "addv   $tmp, $tmp\t# vector (8B)\n\t"
+            "mov    $dst, $tmp\t# vector (1D)" %}
+  ins_encode %{
+    FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
+    loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
+               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
+    __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
+    __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
+    __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0);
+  %}
+
+  ins_pipe(pipe_class_default);
+%}
+
 // ============================================================================
 // MemBar Instruction
 
@@ -13194,7 +13346,7 @@
 // ====================VECTOR INSTRUCTIONS=====================================
 
 // Load vector (32 bits)
-instruct loadV4(vecX dst, vmem mem)
+instruct loadV4(vecD dst, vmem mem)
 %{
   predicate(n->as_LoadVector()->memory_size() == 4);
   match(Set dst (LoadVector mem));
@@ -13205,7 +13357,7 @@
 %}
 
 // Load vector (64 bits)
-instruct loadV8(vecX dst, vmem mem)
+instruct loadV8(vecD dst, vmem mem)
 %{
   predicate(n->as_LoadVector()->memory_size() == 8);
   match(Set dst (LoadVector mem));
@@ -13227,7 +13379,7 @@
 %}
 
 // Store Vector (32 bits)
-instruct storeV4(vecX src, vmem mem)
+instruct storeV4(vecD src, vmem mem)
 %{
   predicate(n->as_StoreVector()->memory_size() == 4);
   match(Set mem (StoreVector mem src));
@@ -13238,7 +13390,7 @@
 %}
 
 // Store Vector (64 bits)
-instruct storeV8(vecX src, vmem mem)
+instruct storeV8(vecD src, vmem mem)
 %{
   predicate(n->as_StoreVector()->memory_size() == 8);
   match(Set mem (StoreVector mem src));
@@ -13259,8 +13411,22 @@
   ins_pipe(pipe_class_memory);
 %}
 
+instruct replicate8B(vecD dst, iRegIorL2I src)
+%{
+  predicate(n->as_Vector()->length() == 4 ||
+            n->as_Vector()->length() == 8);
+  match(Set dst (ReplicateB src));
+  ins_cost(INSN_COST);
+  format %{ "dup  $dst, $src\t# vector (8B)" %}
+  ins_encode %{
+    __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct replicate16B(vecX dst, iRegIorL2I src)
 %{
+  predicate(n->as_Vector()->length() == 16);
   match(Set dst (ReplicateB src));
   ins_cost(INSN_COST);
   format %{ "dup  $dst, $src\t# vector (16B)" %}
@@ -13270,19 +13436,47 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct replicate8B_imm(vecD dst, immI con)
+%{
+  predicate(n->as_Vector()->length() == 4 ||
+            n->as_Vector()->length() == 8);
+  match(Set dst (ReplicateB con));
+  ins_cost(INSN_COST);
+  format %{ "movi  $dst, $con\t# vector(8B)" %}
+  ins_encode %{
+    __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct replicate16B_imm(vecX dst, immI con)
 %{
+  predicate(n->as_Vector()->length() == 16);
   match(Set dst (ReplicateB con));
   ins_cost(INSN_COST);
   format %{ "movi  $dst, $con\t# vector(16B)" %}
   ins_encode %{
-    __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant);
+    __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct replicate4S(vecD dst, iRegIorL2I src)
+%{
+  predicate(n->as_Vector()->length() == 2 ||
+            n->as_Vector()->length() == 4);
+  match(Set dst (ReplicateS src));
+  ins_cost(INSN_COST);
+  format %{ "dup  $dst, $src\t# vector (4S)" %}
+  ins_encode %{
+    __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg));
   %}
   ins_pipe(pipe_class_default);
 %}
 
 instruct replicate8S(vecX dst, iRegIorL2I src)
 %{
+  predicate(n->as_Vector()->length() == 8);
   match(Set dst (ReplicateS src));
   ins_cost(INSN_COST);
   format %{ "dup  $dst, $src\t# vector (8S)" %}
@@ -13292,19 +13486,46 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct replicate4S_imm(vecD dst, immI con)
+%{
+  predicate(n->as_Vector()->length() == 2 ||
+            n->as_Vector()->length() == 4);
+  match(Set dst (ReplicateS con));
+  ins_cost(INSN_COST);
+  format %{ "movi  $dst, $con\t# vector(4H)" %}
+  ins_encode %{
+    __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct replicate8S_imm(vecX dst, immI con)
 %{
+  predicate(n->as_Vector()->length() == 8);
   match(Set dst (ReplicateS con));
   ins_cost(INSN_COST);
   format %{ "movi  $dst, $con\t# vector(8H)" %}
   ins_encode %{
-    __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant);
+    __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct replicate2I(vecD dst, iRegIorL2I src)
+%{
+  predicate(n->as_Vector()->length() == 2);
+  match(Set dst (ReplicateI src));
+  ins_cost(INSN_COST);
+  format %{ "dup  $dst, $src\t# vector (2I)" %}
+  ins_encode %{
+    __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg));
   %}
   ins_pipe(pipe_class_default);
 %}
 
 instruct replicate4I(vecX dst, iRegIorL2I src)
 %{
+  predicate(n->as_Vector()->length() == 4);
   match(Set dst (ReplicateI src));
   ins_cost(INSN_COST);
   format %{ "dup  $dst, $src\t# vector (4I)" %}
@@ -13314,8 +13535,21 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct replicate2I_imm(vecD dst, immI con)
+%{
+  predicate(n->as_Vector()->length() == 2);
+  match(Set dst (ReplicateI con));
+  ins_cost(INSN_COST);
+  format %{ "movi  $dst, $con\t# vector(2I)" %}
+  ins_encode %{
+    __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct replicate4I_imm(vecX dst, immI con)
 %{
+  predicate(n->as_Vector()->length() == 4);
   match(Set dst (ReplicateI con));
   ins_cost(INSN_COST);
   format %{ "movi  $dst, $con\t# vector(4I)" %}
@@ -13327,6 +13561,7 @@
 
 instruct replicate2L(vecX dst, iRegL src)
 %{
+  predicate(n->as_Vector()->length() == 2);
   match(Set dst (ReplicateL src));
   ins_cost(INSN_COST);
   format %{ "dup  $dst, $src\t# vector (2L)" %}
@@ -13338,6 +13573,7 @@
 
 instruct replicate2L_zero(vecX dst, immI0 zero)
 %{
+  predicate(n->as_Vector()->length() == 2);
   match(Set dst (ReplicateI zero));
   ins_cost(INSN_COST);
   format %{ "movi  $dst, $zero\t# vector(4I)" %}
@@ -13349,8 +13585,22 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct replicate2F(vecD dst, vRegF src)
+%{
+  predicate(n->as_Vector()->length() == 2);
+  match(Set dst (ReplicateF src));
+  ins_cost(INSN_COST);
+  format %{ "dup  $dst, $src\t# vector (2F)" %}
+  ins_encode %{
+    __ dup(as_FloatRegister($dst$$reg), __ T2S,
+           as_FloatRegister($src$$reg));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct replicate4F(vecX dst, vRegF src)
 %{
+  predicate(n->as_Vector()->length() == 4);
   match(Set dst (ReplicateF src));
   ins_cost(INSN_COST);
   format %{ "dup  $dst, $src\t# vector (4F)" %}
@@ -13363,6 +13613,7 @@
 
 instruct replicate2D(vecX dst, vRegD src)
 %{
+  predicate(n->as_Vector()->length() == 2);
   match(Set dst (ReplicateD src));
   ins_cost(INSN_COST);
   format %{ "dup  $dst, $src\t# vector (2D)" %}
@@ -13375,6 +13626,25 @@
 
 // ====================REDUCTION ARITHMETIC====================================
 
+instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp, iRegI tmp2)
+%{
+  match(Set dst (AddReductionVI src1 src2));
+  ins_cost(INSN_COST);
+  effect(TEMP tmp, TEMP tmp2);
+  format %{ "umov  $tmp, $src2, S, 0\n\t"
+            "umov  $tmp2, $src2, S, 1\n\t"
+            "addw  $dst, $src1, $tmp\n\t"
+            "addw  $dst, $dst, $tmp2\t add reduction2i"
+  %}
+  ins_encode %{
+    __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
+    __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1);
+    __ addw($dst$$Register, $src1$$Register, $tmp$$Register);
+    __ addw($dst$$Register, $dst$$Register, $tmp2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
 %{
   match(Set dst (AddReductionVI src1 src2));
@@ -13393,6 +13663,25 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegI tmp)
+%{
+  match(Set dst (MulReductionVI src1 src2));
+  ins_cost(INSN_COST);
+  effect(TEMP tmp, TEMP dst);
+  format %{ "umov  $tmp, $src2, S, 0\n\t"
+            "mul   $dst, $tmp, $src1\n\t"
+            "umov  $tmp, $src2, S, 1\n\t"
+            "mul   $dst, $tmp, $dst\t mul reduction2i\n\t"
+  %}
+  ins_encode %{
+    __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0);
+    __ mul($dst$$Register, $tmp$$Register, $src1$$Register);
+    __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1);
+    __ mul($dst$$Register, $tmp$$Register, $dst$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegI tmp2)
 %{
   match(Set dst (MulReductionVI src1 src2));
@@ -13418,6 +13707,26 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
+%{
+  match(Set dst (AddReductionVF src1 src2));
+  ins_cost(INSN_COST);
+  effect(TEMP tmp, TEMP dst);
+  format %{ "fadds $dst, $src1, $src2\n\t"
+            "ins   $tmp, S, $src2, 0, 1\n\t"
+            "fadds $dst, $dst, $tmp\t add reduction2f"
+  %}
+  ins_encode %{
+    __ fadds(as_FloatRegister($dst$$reg),
+             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
+    __ ins(as_FloatRegister($tmp$$reg), __ S,
+           as_FloatRegister($src2$$reg), 0, 1);
+    __ fadds(as_FloatRegister($dst$$reg),
+             as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
 %{
   match(Set dst (AddReductionVF src1 src2));
@@ -13450,6 +13759,26 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp)
+%{
+  match(Set dst (MulReductionVF src1 src2));
+  ins_cost(INSN_COST);
+  effect(TEMP tmp, TEMP dst);
+  format %{ "fmuls $dst, $src1, $src2\n\t"
+            "ins   $tmp, S, $src2, 0, 1\n\t"
+            "fmuls $dst, $dst, $tmp\t add reduction4f"
+  %}
+  ins_encode %{
+    __ fmuls(as_FloatRegister($dst$$reg),
+             as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
+    __ ins(as_FloatRegister($tmp$$reg), __ S,
+           as_FloatRegister($src2$$reg), 0, 1);
+    __ fmuls(as_FloatRegister($dst$$reg),
+             as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp)
 %{
   match(Set dst (MulReductionVF src1 src2));
@@ -13526,8 +13855,24 @@
 
 // --------------------------------- ADD --------------------------------------
 
+instruct vadd8B(vecD dst, vecD src1, vecD src2)
+%{
+  predicate(n->as_Vector()->length() == 4 ||
+            n->as_Vector()->length() == 8);
+  match(Set dst (AddVB src1 src2));
+  ins_cost(INSN_COST);
+  format %{ "addv  $dst,$src1,$src2\t# vector (8B)" %}
+  ins_encode %{
+    __ addv(as_FloatRegister($dst$$reg), __ T8B,
+            as_FloatRegister($src1$$reg),
+            as_FloatRegister($src2$$reg));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct vadd16B(vecX dst, vecX src1, vecX src2)
 %{
+  predicate(n->as_Vector()->length() == 16);
   match(Set dst (AddVB src1 src2));
   ins_cost(INSN_COST);
   format %{ "addv  $dst,$src1,$src2\t# vector (16B)" %}
@@ -13539,8 +13884,24 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct vadd4S(vecD dst, vecD src1, vecD src2)
+%{
+  predicate(n->as_Vector()->length() == 2 ||
+            n->as_Vector()->length() == 4);
+  match(Set dst (AddVS src1 src2));
+  ins_cost(INSN_COST);
+  format %{ "addv  $dst,$src1,$src2\t# vector (4H)" %}
+  ins_encode %{
+    __ addv(as_FloatRegister($dst$$reg), __ T4H,
+            as_FloatRegister($src1$$reg),
+            as_FloatRegister($src2$$reg));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct vadd8S(vecX dst, vecX src1, vecX src2)
 %{
+  predicate(n->as_Vector()->length() == 8);
   match(Set dst (AddVS src1 src2));
   ins_cost(INSN_COST);
   format %{ "addv  $dst,$src1,$src2\t# vector (8H)" %}
@@ -13552,8 +13913,23 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct vadd2I(vecD dst, vecD src1, vecD src2)
+%{
+  predicate(n->as_Vector()->length() == 2);
+  match(Set dst (AddVI src1 src2));
+  ins_cost(INSN_COST);
+  format %{ "addv  $dst,$src1,$src2\t# vector (2S)" %}
+  ins_encode %{
+    __ addv(as_FloatRegister($dst$$reg), __ T2S,
+            as_FloatRegister($src1$$reg),
+            as_FloatRegister($src2$$reg));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct vadd4I(vecX dst, vecX src1, vecX src2)
 %{
+  predicate(n->as_Vector()->length() == 4);
   match(Set dst (AddVI src1 src2));
   ins_cost(INSN_COST);
   format %{ "addv  $dst,$src1,$src2\t# vector (4S)" %}
@@ -13567,6 +13943,7 @@
 
 instruct vadd2L(vecX dst, vecX src1, vecX src2)
 %{
+  predicate(n->as_Vector()->length() == 2);
   match(Set dst (AddVL src1 src2));
   ins_cost(INSN_COST);
   format %{ "addv  $dst,$src1,$src2\t# vector (2L)" %}
@@ -13578,8 +13955,23 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct vadd2F(vecD dst, vecD src1, vecD src2)
+%{
+  predicate(n->as_Vector()->length() == 2);
+  match(Set dst (AddVF src1 src2));
+  ins_cost(INSN_COST);
+  format %{ "fadd  $dst,$src1,$src2\t# vector (2S)" %}
+  ins_encode %{
+    __ fadd(as_FloatRegister($dst$$reg), __ T2S,
+            as_FloatRegister($src1$$reg),
+            as_FloatRegister($src2$$reg));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct vadd4F(vecX dst, vecX src1, vecX src2)
 %{
+  predicate(n->as_Vector()->length() == 4);
   match(Set dst (AddVF src1 src2));
   ins_cost(INSN_COST);
   format %{ "fadd  $dst,$src1,$src2\t# vector (4S)" %}
@@ -13606,8 +13998,24 @@
 
 // --------------------------------- SUB --------------------------------------
 
+instruct vsub8B(vecD dst, vecD src1, vecD src2)
+%{
+  predicate(n->as_Vector()->length() == 4 ||
+            n->as_Vector()->length() == 8);
+  match(Set dst (SubVB src1 src2));
+  ins_cost(INSN_COST);
+  format %{ "subv  $dst,$src1,$src2\t# vector (8B)" %}
+  ins_encode %{
+    __ subv(as_FloatRegister($dst$$reg), __ T8B,
+            as_FloatRegister($src1$$reg),
+            as_FloatRegister($src2$$reg));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct vsub16B(vecX dst, vecX src1, vecX src2)
 %{
+  predicate(n->as_Vector()->length() == 16);
   match(Set dst (SubVB src1 src2));
   ins_cost(INSN_COST);
   format %{ "subv  $dst,$src1,$src2\t# vector (16B)" %}
@@ -13619,8 +14027,24 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct vsub4S(vecD dst, vecD src1, vecD src2)
+%{
+  predicate(n->as_Vector()->length() == 2 ||
+            n->as_Vector()->length() == 4);
+  match(Set dst (SubVS src1 src2));
+  ins_cost(INSN_COST);
+  format %{ "subv  $dst,$src1,$src2\t# vector (4H)" %}
+  ins_encode %{
+    __ subv(as_FloatRegister($dst$$reg), __ T4H,
+            as_FloatRegister($src1$$reg),
+            as_FloatRegister($src2$$reg));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct vsub8S(vecX dst, vecX src1, vecX src2)
 %{
+  predicate(n->as_Vector()->length() == 8);
   match(Set dst (SubVS src1 src2));
   ins_cost(INSN_COST);
   format %{ "subv  $dst,$src1,$src2\t# vector (8H)" %}
@@ -13632,8 +14056,23 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct vsub2I(vecD dst, vecD src1, vecD src2)
+%{
+  predicate(n->as_Vector()->length() == 2);
+  match(Set dst (SubVI src1 src2));
+  ins_cost(INSN_COST);
+  format %{ "subv  $dst,$src1,$src2\t# vector (2S)" %}
+  ins_encode %{
+    __ subv(as_FloatRegister($dst$$reg), __ T2S,
+            as_FloatRegister($src1$$reg),
+            as_FloatRegister($src2$$reg));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct vsub4I(vecX dst, vecX src1, vecX src2)
 %{
+  predicate(n->as_Vector()->length() == 4);
   match(Set dst (SubVI src1 src2));
   ins_cost(INSN_COST);
   format %{ "subv  $dst,$src1,$src2\t# vector (4S)" %}
@@ -13647,6 +14086,7 @@
 
 instruct vsub2L(vecX dst, vecX src1, vecX src2)
 %{
+  predicate(n->as_Vector()->length() == 2);
   match(Set dst (SubVL src1 src2));
   ins_cost(INSN_COST);
   format %{ "subv  $dst,$src1,$src2\t# vector (2L)" %}
@@ -13658,8 +14098,23 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct vsub2F(vecD dst, vecD src1, vecD src2)
+%{
+  predicate(n->as_Vector()->length() == 2);
+  match(Set dst (AddVF src1 src2));
+  ins_cost(INSN_COST);
+  format %{ "fsub  $dst,$src1,$src2\t# vector (2S)" %}
+  ins_encode %{
+    __ fsub(as_FloatRegister($dst$$reg), __ T2S,
+            as_FloatRegister($src1$$reg),
+            as_FloatRegister($src2$$reg));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct vsub4F(vecX dst, vecX src1, vecX src2)
 %{
+  predicate(n->as_Vector()->length() == 4);
   match(Set dst (SubVF src1 src2));
   ins_cost(INSN_COST);
   format %{ "fsub  $dst,$src1,$src2\t# vector (4S)" %}
@@ -13673,6 +14128,7 @@
 
 instruct vsub2D(vecX dst, vecX src1, vecX src2)
 %{
+  predicate(n->as_Vector()->length() == 2);
   match(Set dst (SubVD src1 src2));
   ins_cost(INSN_COST);
   format %{ "fsub  $dst,$src1,$src2\t# vector (2D)" %}
@@ -13686,8 +14142,24 @@
 
 // --------------------------------- MUL --------------------------------------
 
+instruct vmul4S(vecD dst, vecD src1, vecD src2)
+%{
+  predicate(n->as_Vector()->length() == 2 ||
+            n->as_Vector()->length() == 4);
+  match(Set dst (MulVS src1 src2));
+  ins_cost(INSN_COST);
+  format %{ "mulv  $dst,$src1,$src2\t# vector (4H)" %}
+  ins_encode %{
+    __ mulv(as_FloatRegister($dst$$reg), __ T4H,
+            as_FloatRegister($src1$$reg),
+            as_FloatRegister($src2$$reg));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct vmul8S(vecX dst, vecX src1, vecX src2)
 %{
+  predicate(n->as_Vector()->length() == 8);
   match(Set dst (MulVS src1 src2));
   ins_cost(INSN_COST);
   format %{ "mulv  $dst,$src1,$src2\t# vector (8H)" %}
@@ -13699,8 +14171,23 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct vmul2I(vecD dst, vecD src1, vecD src2)
+%{
+  predicate(n->as_Vector()->length() == 2);
+  match(Set dst (MulVI src1 src2));
+  ins_cost(INSN_COST);
+  format %{ "mulv  $dst,$src1,$src2\t# vector (2S)" %}
+  ins_encode %{
+    __ mulv(as_FloatRegister($dst$$reg), __ T2S,
+            as_FloatRegister($src1$$reg),
+            as_FloatRegister($src2$$reg));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct vmul4I(vecX dst, vecX src1, vecX src2)
 %{
+  predicate(n->as_Vector()->length() == 4);
   match(Set dst (MulVI src1 src2));
   ins_cost(INSN_COST);
   format %{ "mulv  $dst,$src1,$src2\t# vector (4S)" %}
@@ -13712,8 +14199,23 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct vmul2F(vecD dst, vecD src1, vecD src2)
+%{
+  predicate(n->as_Vector()->length() == 2);
+  match(Set dst (MulVF src1 src2));
+  ins_cost(INSN_COST);
+  format %{ "fmul  $dst,$src1,$src2\t# vector (2S)" %}
+  ins_encode %{
+    __ fmul(as_FloatRegister($dst$$reg), __ T2S,
+            as_FloatRegister($src1$$reg),
+            as_FloatRegister($src2$$reg));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct vmul4F(vecX dst, vecX src1, vecX src2)
 %{
+  predicate(n->as_Vector()->length() == 4);
   match(Set dst (MulVF src1 src2));
   ins_cost(INSN_COST);
   format %{ "fmul  $dst,$src1,$src2\t# vector (4S)" %}
@@ -13727,6 +14229,7 @@
 
 instruct vmul2D(vecX dst, vecX src1, vecX src2)
 %{
+  predicate(n->as_Vector()->length() == 2);
   match(Set dst (MulVD src1 src2));
   ins_cost(INSN_COST);
   format %{ "fmul  $dst,$src1,$src2\t# vector (2D)" %}
@@ -13740,8 +14243,23 @@
 
 // --------------------------------- DIV --------------------------------------
 
+instruct vdiv2F(vecD dst, vecD src1, vecD src2)
+%{
+  predicate(n->as_Vector()->length() == 2);
+  match(Set dst (DivVF src1 src2));
+  ins_cost(INSN_COST);
+  format %{ "fdiv  $dst,$src1,$src2\t# vector (2S)" %}
+  ins_encode %{
+    __ fdiv(as_FloatRegister($dst$$reg), __ T2S,
+            as_FloatRegister($src1$$reg),
+            as_FloatRegister($src2$$reg));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct vdiv4F(vecX dst, vecX src1, vecX src2)
 %{
+  predicate(n->as_Vector()->length() == 4);
   match(Set dst (DivVF src1 src2));
   ins_cost(INSN_COST);
   format %{ "fdiv  $dst,$src1,$src2\t# vector (4S)" %}
@@ -13755,6 +14273,7 @@
 
 instruct vdiv2D(vecX dst, vecX src1, vecX src2)
 %{
+  predicate(n->as_Vector()->length() == 2);
   match(Set dst (DivVD src1 src2));
   ins_cost(INSN_COST);
   format %{ "fdiv  $dst,$src1,$src2\t# vector (2D)" %}
@@ -13768,8 +14287,24 @@
 
 // --------------------------------- AND --------------------------------------
 
+instruct vand8B(vecD dst, vecD src1, vecD src2)
+%{
+  predicate(n->as_Vector()->length_in_bytes() == 4 ||
+            n->as_Vector()->length_in_bytes() == 8);
+  match(Set dst (AndV src1 src2));
+  ins_cost(INSN_COST);
+  format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
+  ins_encode %{
+    __ andr(as_FloatRegister($dst$$reg), __ T8B,
+            as_FloatRegister($src1$$reg),
+            as_FloatRegister($src2$$reg));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct vand16B(vecX dst, vecX src1, vecX src2)
 %{
+  predicate(n->as_Vector()->length_in_bytes() == 16);
   match(Set dst (AndV src1 src2));
   ins_cost(INSN_COST);
   format %{ "and  $dst,$src1,$src2\t# vector (16B)" %}
@@ -13783,8 +14318,24 @@
 
 // --------------------------------- OR ---------------------------------------
 
+instruct vor8B(vecD dst, vecD src1, vecD src2)
+%{
+  predicate(n->as_Vector()->length_in_bytes() == 4 ||
+            n->as_Vector()->length_in_bytes() == 8);
+  match(Set dst (OrV src1 src2));
+  ins_cost(INSN_COST);
+  format %{ "and  $dst,$src1,$src2\t# vector (8B)" %}
+  ins_encode %{
+    __ orr(as_FloatRegister($dst$$reg), __ T8B,
+            as_FloatRegister($src1$$reg),
+            as_FloatRegister($src2$$reg));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct vor16B(vecX dst, vecX src1, vecX src2)
 %{
+  predicate(n->as_Vector()->length_in_bytes() == 16);
   match(Set dst (OrV src1 src2));
   ins_cost(INSN_COST);
   format %{ "orr  $dst,$src1,$src2\t# vector (16B)" %}
@@ -13798,8 +14349,24 @@
 
 // --------------------------------- XOR --------------------------------------
 
+instruct vxor8B(vecD dst, vecD src1, vecD src2)
+%{
+  predicate(n->as_Vector()->length_in_bytes() == 4 ||
+            n->as_Vector()->length_in_bytes() == 8);
+  match(Set dst (XorV src1 src2));
+  ins_cost(INSN_COST);
+  format %{ "xor  $dst,$src1,$src2\t# vector (8B)" %}
+  ins_encode %{
+    __ eor(as_FloatRegister($dst$$reg), __ T8B,
+            as_FloatRegister($src1$$reg),
+            as_FloatRegister($src2$$reg));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct vxor16B(vecX dst, vecX src1, vecX src2)
 %{
+  predicate(n->as_Vector()->length_in_bytes() == 16);
   match(Set dst (XorV src1 src2));
   ins_cost(INSN_COST);
   format %{ "xor  $dst,$src1,$src2\t# vector (16B)" %}
@@ -13833,7 +14400,23 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct vsll8B(vecD dst, vecD src, vecX shift) %{
+  predicate(n->as_Vector()->length() == 4 ||
+            n->as_Vector()->length() == 8);
+  match(Set dst (LShiftVB src shift));
+  match(Set dst (RShiftVB src shift));
+  ins_cost(INSN_COST);
+  format %{ "sshl  $dst,$src,$shift\t# vector (8B)" %}
+  ins_encode %{
+    __ sshl(as_FloatRegister($dst$$reg), __ T8B,
+            as_FloatRegister($src$$reg),
+            as_FloatRegister($shift$$reg));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct vsll16B(vecX dst, vecX src, vecX shift) %{
+  predicate(n->as_Vector()->length() == 16);
   match(Set dst (LShiftVB src shift));
   match(Set dst (RShiftVB src shift));
   ins_cost(INSN_COST);
@@ -13846,7 +14429,22 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct vsrl8B(vecD dst, vecD src, vecX shift) %{
+  predicate(n->as_Vector()->length() == 4 ||
+            n->as_Vector()->length() == 8);
+  match(Set dst (URShiftVB src shift));
+  ins_cost(INSN_COST);
+  format %{ "ushl  $dst,$src,$shift\t# vector (8B)" %}
+  ins_encode %{
+    __ ushl(as_FloatRegister($dst$$reg), __ T8B,
+            as_FloatRegister($src$$reg),
+            as_FloatRegister($shift$$reg));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct vsrl16B(vecX dst, vecX src, vecX shift) %{
+  predicate(n->as_Vector()->length() == 16);
   match(Set dst (URShiftVB src shift));
   ins_cost(INSN_COST);
   format %{ "ushl  $dst,$src,$shift\t# vector (16B)" %}
@@ -13858,7 +14456,28 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
+  predicate(n->as_Vector()->length() == 4 ||
+            n->as_Vector()->length() == 8);
+  match(Set dst (LShiftVB src shift));
+  ins_cost(INSN_COST);
+  format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
+  ins_encode %{
+    int sh = (int)$shift$$constant & 31;
+    if (sh >= 8) {
+      __ eor(as_FloatRegister($dst$$reg), __ T8B,
+             as_FloatRegister($src$$reg),
+             as_FloatRegister($src$$reg));
+    } else {
+      __ shl(as_FloatRegister($dst$$reg), __ T8B,
+             as_FloatRegister($src$$reg), sh);
+    }
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
+  predicate(n->as_Vector()->length() == 16);
   match(Set dst (LShiftVB src shift));
   ins_cost(INSN_COST);
   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
@@ -13876,7 +14495,24 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
+  predicate(n->as_Vector()->length() == 4 ||
+            n->as_Vector()->length() == 8);
+  match(Set dst (RShiftVB src shift));
+  ins_cost(INSN_COST);
+  format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
+  ins_encode %{
+    int sh = (int)$shift$$constant & 31;
+    if (sh >= 8) sh = 7;
+    sh = -sh & 7;
+    __ sshr(as_FloatRegister($dst$$reg), __ T8B,
+           as_FloatRegister($src$$reg), sh);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
+  predicate(n->as_Vector()->length() == 16);
   match(Set dst (RShiftVB src shift));
   ins_cost(INSN_COST);
   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
@@ -13890,7 +14526,28 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
+  predicate(n->as_Vector()->length() == 4 ||
+            n->as_Vector()->length() == 8);
+  match(Set dst (URShiftVB src shift));
+  ins_cost(INSN_COST);
+  format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
+  ins_encode %{
+    int sh = (int)$shift$$constant & 31;
+    if (sh >= 8) {
+      __ eor(as_FloatRegister($dst$$reg), __ T8B,
+             as_FloatRegister($src$$reg),
+             as_FloatRegister($src$$reg));
+    } else {
+      __ ushr(as_FloatRegister($dst$$reg), __ T8B,
+             as_FloatRegister($src$$reg), -sh & 7);
+    }
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
+  predicate(n->as_Vector()->length() == 16);
   match(Set dst (URShiftVB src shift));
   ins_cost(INSN_COST);
   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
@@ -13908,7 +14565,23 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct vsll4S(vecD dst, vecD src, vecX shift) %{
+  predicate(n->as_Vector()->length() == 2 ||
+            n->as_Vector()->length() == 4);
+  match(Set dst (LShiftVS src shift));
+  match(Set dst (RShiftVS src shift));
+  ins_cost(INSN_COST);
+  format %{ "sshl  $dst,$src,$shift\t# vector (4H)" %}
+  ins_encode %{
+    __ sshl(as_FloatRegister($dst$$reg), __ T4H,
+            as_FloatRegister($src$$reg),
+            as_FloatRegister($shift$$reg));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct vsll8S(vecX dst, vecX src, vecX shift) %{
+  predicate(n->as_Vector()->length() == 8);
   match(Set dst (LShiftVS src shift));
   match(Set dst (RShiftVS src shift));
   ins_cost(INSN_COST);
@@ -13921,7 +14594,22 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct vsrl4S(vecD dst, vecD src, vecX shift) %{
+  predicate(n->as_Vector()->length() == 2 ||
+            n->as_Vector()->length() == 4);
+  match(Set dst (URShiftVS src shift));
+  ins_cost(INSN_COST);
+  format %{ "ushl  $dst,$src,$shift\t# vector (4H)" %}
+  ins_encode %{
+    __ ushl(as_FloatRegister($dst$$reg), __ T4H,
+            as_FloatRegister($src$$reg),
+            as_FloatRegister($shift$$reg));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct vsrl8S(vecX dst, vecX src, vecX shift) %{
+  predicate(n->as_Vector()->length() == 8);
   match(Set dst (URShiftVS src shift));
   ins_cost(INSN_COST);
   format %{ "ushl  $dst,$src,$shift\t# vector (8H)" %}
@@ -13933,7 +14621,28 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
+  predicate(n->as_Vector()->length() == 2 ||
+            n->as_Vector()->length() == 4);
+  match(Set dst (LShiftVS src shift));
+  ins_cost(INSN_COST);
+  format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
+  ins_encode %{
+    int sh = (int)$shift$$constant & 31;
+    if (sh >= 16) {
+      __ eor(as_FloatRegister($dst$$reg), __ T8B,
+             as_FloatRegister($src$$reg),
+             as_FloatRegister($src$$reg));
+    } else {
+      __ shl(as_FloatRegister($dst$$reg), __ T4H,
+             as_FloatRegister($src$$reg), sh);
+    }
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
+  predicate(n->as_Vector()->length() == 8);
   match(Set dst (LShiftVS src shift));
   ins_cost(INSN_COST);
   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
@@ -13951,7 +14660,24 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
+  predicate(n->as_Vector()->length() == 2 ||
+            n->as_Vector()->length() == 4);
+  match(Set dst (RShiftVS src shift));
+  ins_cost(INSN_COST);
+  format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
+  ins_encode %{
+    int sh = (int)$shift$$constant & 31;
+    if (sh >= 16) sh = 15;
+    sh = -sh & 15;
+    __ sshr(as_FloatRegister($dst$$reg), __ T4H,
+           as_FloatRegister($src$$reg), sh);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
+  predicate(n->as_Vector()->length() == 8);
   match(Set dst (RShiftVS src shift));
   ins_cost(INSN_COST);
   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
@@ -13965,7 +14691,28 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
+  predicate(n->as_Vector()->length() == 2 ||
+            n->as_Vector()->length() == 4);
+  match(Set dst (URShiftVS src shift));
+  ins_cost(INSN_COST);
+  format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
+  ins_encode %{
+    int sh = (int)$shift$$constant & 31;
+    if (sh >= 16) {
+      __ eor(as_FloatRegister($dst$$reg), __ T8B,
+             as_FloatRegister($src$$reg),
+             as_FloatRegister($src$$reg));
+    } else {
+      __ ushr(as_FloatRegister($dst$$reg), __ T4H,
+             as_FloatRegister($src$$reg), -sh & 15);
+    }
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
+  predicate(n->as_Vector()->length() == 8);
   match(Set dst (URShiftVS src shift));
   ins_cost(INSN_COST);
   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
@@ -13983,7 +14730,22 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct vsll2I(vecD dst, vecD src, vecX shift) %{
+  predicate(n->as_Vector()->length() == 2);
+  match(Set dst (LShiftVI src shift));
+  match(Set dst (RShiftVI src shift));
+  ins_cost(INSN_COST);
+  format %{ "sshl  $dst,$src,$shift\t# vector (2S)" %}
+  ins_encode %{
+    __ sshl(as_FloatRegister($dst$$reg), __ T2S,
+            as_FloatRegister($src$$reg),
+            as_FloatRegister($shift$$reg));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct vsll4I(vecX dst, vecX src, vecX shift) %{
+  predicate(n->as_Vector()->length() == 4);
   match(Set dst (LShiftVI src shift));
   match(Set dst (RShiftVI src shift));
   ins_cost(INSN_COST);
@@ -13996,7 +14758,21 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct vsrl2I(vecD dst, vecD src, vecX shift) %{
+  predicate(n->as_Vector()->length() == 2);
+  match(Set dst (URShiftVI src shift));
+  ins_cost(INSN_COST);
+  format %{ "ushl  $dst,$src,$shift\t# vector (2S)" %}
+  ins_encode %{
+    __ ushl(as_FloatRegister($dst$$reg), __ T2S,
+            as_FloatRegister($src$$reg),
+            as_FloatRegister($shift$$reg));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct vsrl4I(vecX dst, vecX src, vecX shift) %{
+  predicate(n->as_Vector()->length() == 4);
   match(Set dst (URShiftVI src shift));
   ins_cost(INSN_COST);
   format %{ "ushl  $dst,$src,$shift\t# vector (4S)" %}
@@ -14008,7 +14784,21 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
+  predicate(n->as_Vector()->length() == 2);
+  match(Set dst (LShiftVI src shift));
+  ins_cost(INSN_COST);
+  format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
+  ins_encode %{
+    __ shl(as_FloatRegister($dst$$reg), __ T2S,
+           as_FloatRegister($src$$reg),
+           (int)$shift$$constant & 31);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
+  predicate(n->as_Vector()->length() == 4);
   match(Set dst (LShiftVI src shift));
   ins_cost(INSN_COST);
   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
@@ -14020,7 +14810,21 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
+  predicate(n->as_Vector()->length() == 2);
+  match(Set dst (RShiftVI src shift));
+  ins_cost(INSN_COST);
+  format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
+  ins_encode %{
+    __ sshr(as_FloatRegister($dst$$reg), __ T2S,
+            as_FloatRegister($src$$reg),
+            -(int)$shift$$constant & 31);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
+  predicate(n->as_Vector()->length() == 4);
   match(Set dst (RShiftVI src shift));
   ins_cost(INSN_COST);
   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
@@ -14032,7 +14836,21 @@
   ins_pipe(pipe_class_default);
 %}
 
+instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
+  predicate(n->as_Vector()->length() == 2);
+  match(Set dst (URShiftVI src shift));
+  ins_cost(INSN_COST);
+  format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
+  ins_encode %{
+    __ ushr(as_FloatRegister($dst$$reg), __ T2S,
+            as_FloatRegister($src$$reg),
+            -(int)$shift$$constant & 31);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
+  predicate(n->as_Vector()->length() == 4);
   match(Set dst (URShiftVI src shift));
   ins_cost(INSN_COST);
   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
@@ -14045,6 +14863,7 @@
 %}
 
 instruct vsll2L(vecX dst, vecX src, vecX shift) %{
+  predicate(n->as_Vector()->length() == 2);
   match(Set dst (LShiftVL src shift));
   match(Set dst (RShiftVL src shift));
   ins_cost(INSN_COST);
@@ -14058,6 +14877,7 @@
 %}
 
 instruct vsrl2L(vecX dst, vecX src, vecX shift) %{
+  predicate(n->as_Vector()->length() == 2);
   match(Set dst (URShiftVL src shift));
   ins_cost(INSN_COST);
   format %{ "ushl  $dst,$src,$shift\t# vector (2D)" %}
@@ -14070,6 +14890,7 @@
 %}
 
 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
+  predicate(n->as_Vector()->length() == 2);
   match(Set dst (LShiftVL src shift));
   ins_cost(INSN_COST);
   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
@@ -14082,6 +14903,7 @@
 %}
 
 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
+  predicate(n->as_Vector()->length() == 2);
   match(Set dst (RShiftVL src shift));
   ins_cost(INSN_COST);
   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
@@ -14094,6 +14916,7 @@
 %}
 
 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
+  predicate(n->as_Vector()->length() == 2);
   match(Set dst (URShiftVL src shift));
   ins_cost(INSN_COST);
   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
--- a/hotspot/src/cpu/aarch64/vm/assembler_aarch64.hpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/cpu/aarch64/vm/assembler_aarch64.hpp	Thu Jul 02 14:20:36 2015 -0700
@@ -491,6 +491,11 @@
         i->rf(_index, 16);
         i->f(_ext.option(), 15, 13);
         unsigned size = i->get(31, 30);
+        if (i->get(26, 26) && i->get(23, 23)) {
+          // SIMD Q Type - Size = 128 bits
+          assert(size == 0, "bad size");
+          size = 0b100;
+        }
         if (size == 0) // It's a byte
           i->f(_ext.shift() >= 0, 12);
         else {
@@ -2050,6 +2055,9 @@
   INSN(negr,  1, 0b100000101110);
   INSN(notr,  1, 0b100000010110);
   INSN(addv,  0, 0b110001101110);
+  INSN(cls,   0, 0b100000010010);
+  INSN(clz,   1, 0b100000010010);
+  INSN(cnt,   0, 0b100000010110);
 
 #undef INSN
 
--- a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Thu Jul 02 14:20:36 2015 -0700
@@ -1408,6 +1408,52 @@
   movk(r, imm64 & 0xffff, 32);
 }
 
+// Macro to mov replicated immediate to vector register.
+//  Vd will get the following values for different arrangements in T
+//   imm32 == hex 000000gh  T8B:  Vd = ghghghghghghghgh
+//   imm32 == hex 000000gh  T16B: Vd = ghghghghghghghghghghghghghghghgh
+//   imm32 == hex 0000efgh  T4H:  Vd = efghefghefghefgh
+//   imm32 == hex 0000efgh  T8H:  Vd = efghefghefghefghefghefghefghefgh
+//   imm32 == hex abcdefgh  T2S:  Vd = abcdefghabcdefgh
+//   imm32 == hex abcdefgh  T4S:  Vd = abcdefghabcdefghabcdefghabcdefgh
+//   T1D/T2D: invalid
+void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, u_int32_t imm32) {
+  assert(T != T1D && T != T2D, "invalid arrangement");
+  if (T == T8B || T == T16B) {
+    assert((imm32 & ~0xff) == 0, "extraneous bits in unsigned imm32 (T8B/T16B)");
+    movi(Vd, T, imm32 & 0xff, 0);
+    return;
+  }
+  u_int32_t nimm32 = ~imm32;
+  if (T == T4H || T == T8H) {
+    assert((imm32  & ~0xffff) == 0, "extraneous bits in unsigned imm32 (T4H/T8H)");
+    imm32 &= 0xffff;
+    nimm32 &= 0xffff;
+  }
+  u_int32_t x = imm32;
+  int movi_cnt = 0;
+  int movn_cnt = 0;
+  while (x) { if (x & 0xff) movi_cnt++; x >>= 8; }
+  x = nimm32;
+  while (x) { if (x & 0xff) movn_cnt++; x >>= 8; }
+  if (movn_cnt < movi_cnt) imm32 = nimm32;
+  unsigned lsl = 0;
+  while (imm32 && (imm32 & 0xff) == 0) { lsl += 8; imm32 >>= 8; }
+  if (movn_cnt < movi_cnt)
+    mvni(Vd, T, imm32 & 0xff, lsl);
+  else
+    movi(Vd, T, imm32 & 0xff, lsl);
+  imm32 >>= 8; lsl += 8;
+  while (imm32) {
+    while ((imm32 & 0xff) == 0) { lsl += 8; imm32 >>= 8; }
+    if (movn_cnt < movi_cnt)
+      bici(Vd, T, imm32 & 0xff, lsl);
+    else
+      orri(Vd, T, imm32 & 0xff, lsl);
+    lsl += 8; imm32 >>= 8;
+  }
+}
+
 void MacroAssembler::mov_immediate64(Register dst, u_int64_t imm64)
 {
 #ifndef PRODUCT
--- a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp	Thu Jul 02 14:20:36 2015 -0700
@@ -36,6 +36,7 @@
 class MacroAssembler: public Assembler {
   friend class LIR_Assembler;
 
+ public:
   using Assembler::mov;
   using Assembler::movi;
 
@@ -465,44 +466,7 @@
 
   void movptr(Register r, uintptr_t imm64);
 
-  // Macro to mov replicated immediate to vector register.
-  // Where imm32 == hex abcdefgh, Vd will get the following values
-  // for different arrangements in T
-  //   T8B:  Vd = ghghghghghghghgh
-  //   T16B: Vd = ghghghghghghghghghghghghghghghgh
-  //   T4H:  Vd = efghefghefghefgh
-  //   T8H:  Vd = efghefghefghefghefghefghefghefgh
-  //   T2S:  Vd = abcdefghabcdefgh
-  //   T4S:  Vd = abcdefghabcdefghabcdefghabcdefgh
-  //   T1D/T2D: invalid
-  void mov(FloatRegister Vd, SIMD_Arrangement T, u_int32_t imm32) {
-    assert(T != T1D && T != T2D, "invalid arrangement");
-    u_int32_t nimm32 = ~imm32;
-    if (T == T8B || T == T16B) { imm32 &= 0xff; nimm32 &= 0xff; }
-    if (T == T4H || T == T8H) { imm32 &= 0xffff; nimm32 &= 0xffff; }
-    u_int32_t x = imm32;
-    int movi_cnt = 0;
-    int movn_cnt = 0;
-    while (x) { if (x & 0xff) movi_cnt++; x >>= 8; }
-    x = nimm32;
-    while (x) { if (x & 0xff) movn_cnt++; x >>= 8; }
-    if (movn_cnt < movi_cnt) imm32 = nimm32;
-    unsigned lsl = 0;
-    while (imm32 && (imm32 & 0xff) == 0) { lsl += 8; imm32 >>= 8; }
-    if (movn_cnt < movi_cnt)
-      mvni(Vd, T, imm32 & 0xff, lsl);
-    else
-      movi(Vd, T, imm32 & 0xff, lsl);
-    imm32 >>= 8; lsl += 8;
-    while (imm32) {
-      while ((imm32 & 0xff) == 0) { lsl += 8; imm32 >>= 8; }
-      if (movn_cnt < movi_cnt)
-        bici(Vd, T, imm32 & 0xff, lsl);
-      else
-        orri(Vd, T, imm32 & 0xff, lsl);
-      lsl += 8; imm32 >>= 8;
-    }
-  }
+  void mov(FloatRegister Vd, SIMD_Arrangement T, u_int32_t imm32);
 
   // macro instructions for accessing and updating floating point
   // status register
--- a/hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp	Thu Jul 02 14:20:36 2015 -0700
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
+ * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2120,6 +2120,7 @@
       save_native_result(masm, ret_type, stack_slots);
     }
 
+    __ mov(c_rarg2, rthread);
     __ lea(c_rarg1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
     __ mov(c_rarg0, obj_reg);
 
@@ -2128,7 +2129,7 @@
     __ ldr(r19, Address(rthread, in_bytes(Thread::pending_exception_offset())));
     __ str(zr, Address(rthread, in_bytes(Thread::pending_exception_offset())));
 
-    rt_call(masm, CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), 2, 0, 1);
+    rt_call(masm, CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), 3, 0, 1);
 
 #ifdef ASSERT
     {
--- a/hotspot/src/cpu/aarch64/vm/vm_version_aarch64.cpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/cpu/aarch64/vm/vm_version_aarch64.cpp	Thu Jul 02 14:20:36 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2015, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -190,10 +190,21 @@
     }
   }
 
+  if (UseGHASHIntrinsics) {
+    warning("GHASH intrinsics are not available on this CPU");
+    FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
+  }
+
   if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
     UseCRC32Intrinsics = true;
   }
 
+  if (UseCRC32CIntrinsics) {
+    if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics))
+      warning("CRC32C intrinsics are not available on this CPU");
+    FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
+  }
+
   if (auxv & (HWCAP_SHA1 | HWCAP_SHA2)) {
     if (FLAG_IS_DEFAULT(UseSHA)) {
       FLAG_SET_DEFAULT(UseSHA, true);
@@ -246,6 +257,10 @@
     UseBarriersForVolatile = (_cpuFeatures & CPU_DMB_ATOMICS) != 0;
   }
 
+  if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
+    UsePopCountInstruction = true;
+  }
+
 #ifdef COMPILER2
   if (FLAG_IS_DEFAULT(OptoScheduling)) {
     OptoScheduling = true;
--- a/hotspot/src/cpu/ppc/vm/sharedRuntime_ppc.cpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/cpu/ppc/vm/sharedRuntime_ppc.cpp	Thu Jul 02 14:20:36 2015 -0700
@@ -2475,7 +2475,8 @@
 
     // Slow case of monitor enter.
     // Inline a special case of call_VM that disallows any pending_exception.
-    __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), r_oop, r_box);
+    // Arguments are (oop obj, BasicLock* lock, JavaThread* thread).
+    __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), r_oop, r_box, R16_thread);
 
     __ asm_assert_mem8_is_zero(thread_(pending_exception),
        "no pending exception allowed on exit from SharedRuntime::complete_monitor_unlocking_C", 0);
--- a/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp	Thu Jul 02 14:20:36 2015 -0700
@@ -176,6 +176,11 @@
     FLAG_SET_DEFAULT(UseAESIntrinsics, false);
   }
 
+  if (UseGHASHIntrinsics) {
+    warning("GHASH intrinsics are not available on this CPU");
+    FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
+  }
+
   if (UseSHA) {
     warning("SHA instructions are not available on this CPU");
     FLAG_SET_DEFAULT(UseSHA, false);
@@ -186,6 +191,13 @@
     FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
     FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
   }
+
+  if (UseCRC32CIntrinsics) {
+    if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics))
+      warning("CRC32C intrinsics are not available on this CPU");
+    FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
+  }
+
   // Adjust RTM (Restricted Transactional Memory) flags.
   if (!has_tcheck() && UseRTMLocking) {
     // Can't continue because UseRTMLocking affects UseBiasedLocking flag
--- a/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp	Thu Jul 02 14:20:36 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -128,7 +128,11 @@
     faligndata_op3 = 0x36,
     flog3_op3    = 0x36,
     edge_op3     = 0x36,
+    fzero_op3    = 0x36,
     fsrc_op3     = 0x36,
+    fnot_op3     = 0x36,
+    xmulx_op3    = 0x36,
+    crc32c_op3   = 0x36,
     impdep2_op3  = 0x37,
     stpartialf_op3 = 0x37,
     jmpl_op3     = 0x38,
@@ -220,6 +224,8 @@
     mdtox_opf          = 0x110,
     mstouw_opf         = 0x111,
     mstosw_opf         = 0x113,
+    xmulx_opf          = 0x115,
+    xmulxhi_opf        = 0x116,
     mxtod_opf          = 0x118,
     mwtos_opf          = 0x119,
 
@@ -228,7 +234,9 @@
 
     sha1_opf           = 0x141,
     sha256_opf         = 0x142,
-    sha512_opf         = 0x143
+    sha512_opf         = 0x143,
+
+    crc32c_opf         = 0x147
   };
 
   enum op5s {
@@ -597,6 +605,11 @@
     return x & ((1 << 10) - 1);
   }
 
+  // create a low12 __value__ (not a field) for a given a 32-bit constant
+  static int low12( int x ) {
+    return x & ((1 << 12) - 1);
+  }
+
   // AES crypto instructions supported only on certain processors
   static void aes_only() { assert( VM_Version::has_aes(), "This instruction only works on SPARC with AES instructions support"); }
 
@@ -605,6 +618,9 @@
   static void sha256_only() { assert( VM_Version::has_sha256(), "This instruction only works on SPARC with SHA256"); }
   static void sha512_only() { assert( VM_Version::has_sha512(), "This instruction only works on SPARC with SHA512"); }
 
+  // CRC32C instruction supported only on certain processors
+  static void crc32c_only() { assert( VM_Version::has_crc32c(), "This instruction only works on SPARC with CRC32C"); }
+
   // instruction only in VIS1
   static void vis1_only() { assert( VM_Version::has_vis1(), "This instruction only works on SPARC with VIS1"); }
 
@@ -1019,6 +1035,7 @@
 
   void nop() { emit_int32( op(branch_op) | op2(sethi_op2) ); }
 
+  void sw_count() { emit_int32( op(branch_op) | op2(sethi_op2) | 0x3f0 ); }
 
   // pp 202
 
@@ -1195,8 +1212,14 @@
 
   void faligndata( FloatRegister s1, FloatRegister s2, FloatRegister d ) { vis1_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(faligndata_op3) | fs1(s1, FloatRegisterImpl::D) | opf(faligndata_opf) | fs2(s2, FloatRegisterImpl::D)); }
 
+  void fzero( FloatRegisterImpl::Width w, FloatRegister d ) { vis1_only(); emit_int32( op(arith_op) | fd(d, w) | op3(fzero_op3) | opf(0x62 - w)); }
+
   void fsrc2( FloatRegisterImpl::Width w, FloatRegister s2, FloatRegister d ) { vis1_only(); emit_int32( op(arith_op) | fd(d, w) | op3(fsrc_op3) | opf(0x7A - w) | fs2(s2, w)); }
 
+  void fnot1( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister d ) { vis1_only(); emit_int32( op(arith_op) | fd(d, w) | op3(fnot_op3) | fs1(s1, w) | opf(0x6C - w)); }
+
+  void fpmerge( FloatRegister s1, FloatRegister s2, FloatRegister d ) { vis1_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(0x36) | fs1(s1, FloatRegisterImpl::S) | opf(0x4b) | fs2(s2, FloatRegisterImpl::S)); }
+
   void stpartialf( Register s1, Register s2, FloatRegister d, int ia = -1 ) { vis1_only(); emit_int32( op(ldst_op) | fd(d, FloatRegisterImpl::D) | op3(stpartialf_op3) | rs1(s1) | imm_asi(ia) | rs2(s2)); }
 
   //  VIS2 instructions
@@ -1212,12 +1235,19 @@
   void movwtos( Register s, FloatRegister d ) { vis3_only();  emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::S) | op3(mftoi_op3) | opf(mwtos_opf) | rs2(s)); }
   void movxtod( Register s, FloatRegister d ) { vis3_only();  emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(mftoi_op3) | opf(mxtod_opf) | rs2(s)); }
 
+  void xmulx(Register s1, Register s2, Register d) { vis3_only(); emit_int32( op(arith_op) | rd(d) | op3(xmulx_op3) | rs1(s1) | opf(xmulx_opf) | rs2(s2)); }
+  void xmulxhi(Register s1, Register s2, Register d) { vis3_only(); emit_int32( op(arith_op) | rd(d) | op3(xmulx_op3) | rs1(s1) | opf(xmulxhi_opf) | rs2(s2)); }
+
   // Crypto SHA instructions
 
   void sha1()   { sha1_only();    emit_int32( op(arith_op) | op3(sha_op3) | opf(sha1_opf)); }
   void sha256() { sha256_only();  emit_int32( op(arith_op) | op3(sha_op3) | opf(sha256_opf)); }
   void sha512() { sha512_only();  emit_int32( op(arith_op) | op3(sha_op3) | opf(sha512_opf)); }
 
+  // CRC32C instruction
+
+  void crc32c( FloatRegister s1, FloatRegister s2, FloatRegister d ) { crc32c_only();  emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(crc32c_op3) | fs1(s1, FloatRegisterImpl::D) | opf(crc32c_opf) | fs2(s2, FloatRegisterImpl::D)); }
+
   // Creation
   Assembler(CodeBuffer* code) : AbstractAssembler(code) {
 #ifdef CHECK_DELAY
--- a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Thu Jul 02 14:20:36 2015 -0700
@@ -956,6 +956,7 @@
 
   int hi = (int)(value >> 32);
   int lo = (int)(value & ~0);
+  int bits_33to2 = (int)((value >> 2) & ~0);
   // (Matcher::isSimpleConstant64 knows about the following optimizations.)
   if (Assembler::is_simm13(lo) && value == lo) {
     or3(G0, lo, d);
@@ -964,6 +965,12 @@
     if (low10(lo) != 0)
       or3(d, low10(lo), d);
   }
+  else if ((hi >> 2) == 0) {
+    Assembler::sethi(bits_33to2, d);  // hardware version zero-extends to upper 32
+    sllx(d, 2, d);
+    if (low12(lo) != 0)
+      or3(d, low12(lo), d);
+  }
   else if (hi == -1) {
     Assembler::sethi(~lo, d);  // hardware version zero-extends to upper 32
     xor3(d, low10(lo) ^ ~low10(~0), d);
@@ -4351,3 +4358,52 @@
   cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, small_loop);
   nop(); // Separate short branches
 }
+
+/**
+ * Update CRC-32[C] with a byte value according to constants in table
+ *
+ * @param [in,out]crc   Register containing the crc.
+ * @param [in]val       Register containing the byte to fold into the CRC.
+ * @param [in]table     Register containing the table of crc constants.
+ *
+ * uint32_t crc;
+ * val = crc_table[(val ^ crc) & 0xFF];
+ * crc = val ^ (crc >> 8);
+ */
+void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
+  xor3(val, crc, val);
+  and3(val, 0xFF, val);
+  sllx(val, 2, val);
+  lduw(table, val, val);
+  srlx(crc, 8, crc);
+  xor3(val, crc, crc);
+}
+
+// Reverse byte order of lower 32 bits, assuming upper 32 bits all zeros
+void MacroAssembler::reverse_bytes_32(Register src, Register dst, Register tmp) {
+  srlx(src, 24, dst);
+
+  sllx(src, 32+8, tmp);
+  srlx(tmp, 32+24, tmp);
+  sllx(tmp, 8, tmp);
+  or3(dst, tmp, dst);
+
+  sllx(src, 32+16, tmp);
+  srlx(tmp, 32+24, tmp);
+  sllx(tmp, 16, tmp);
+  or3(dst, tmp, dst);
+
+  sllx(src, 32+24, tmp);
+  srlx(tmp, 32, tmp);
+  or3(dst, tmp, dst);
+}
+
+void MacroAssembler::movitof_revbytes(Register src, FloatRegister dst, Register tmp1, Register tmp2) {
+  reverse_bytes_32(src, tmp1, tmp2);
+  movxtod(tmp1, dst);
+}
+
+void MacroAssembler::movftoi_revbytes(FloatRegister src, Register dst, Register tmp1, Register tmp2) {
+  movdtox(src, tmp1);
+  reverse_bytes_32(tmp1, dst, tmp2);
+}
--- a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.hpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.hpp	Thu Jul 02 14:20:36 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -903,6 +903,10 @@
   inline void ldf(FloatRegisterImpl::Width w, Register s1, RegisterOrConstant s2, FloatRegister d);
   inline void ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset = 0);
 
+  // little-endian
+  inline void ldxl(Register s1, Register s2, Register d) { ldxa(s1, s2, ASI_PRIMARY_LITTLE, d); }
+  inline void ldfl(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { ldfa(w, s1, s2, ASI_PRIMARY_LITTLE, d); }
+
   // membar psuedo instruction.  takes into account target memory model.
   inline void membar( Assembler::Membar_mask_bits const7a );
 
@@ -1436,6 +1440,14 @@
   // Use BIS for zeroing
   void bis_zeroing(Register to, Register count, Register temp, Label& Ldone);
 
+  // Update CRC-32[C] with a byte value according to constants in table
+  void update_byte_crc32(Register crc, Register val, Register table);
+
+  // Reverse byte order of lower 32 bits, assuming upper 32 bits all zeros
+  void reverse_bytes_32(Register src, Register dst, Register tmp);
+  void movitof_revbytes(Register src, FloatRegister dst, Register tmp1, Register tmp2);
+  void movftoi_revbytes(FloatRegister src, Register dst, Register tmp1, Register tmp2);
+
 #undef VIRTUAL
 };
 
--- a/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Thu Jul 02 14:20:36 2015 -0700
@@ -4786,6 +4786,330 @@
     return start;
   }
 
+  /* Single and multi-block ghash operations */
+  address generate_ghash_processBlocks() {
+      __ align(CodeEntryAlignment);
+      Label L_ghash_loop, L_aligned, L_main;
+      StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks");
+      address start = __ pc();
+
+      Register state = I0;
+      Register subkeyH = I1;
+      Register data = I2;
+      Register len = I3;
+
+      __ save_frame(0);
+
+      __ ldx(state, 0, O0);
+      __ ldx(state, 8, O1);
+
+      // Loop label for multiblock operations
+      __ BIND(L_ghash_loop);
+
+      // Check if 'data' is unaligned
+      __ andcc(data, 7, G1);
+      __ br(Assembler::zero, false, Assembler::pt, L_aligned);
+      __ delayed()->nop();
+
+      Register left_shift = L1;
+      Register right_shift = L2;
+      Register data_ptr = L3;
+
+      // Get left and right shift values in bits
+      __ sll(G1, LogBitsPerByte, left_shift);
+      __ mov(64, right_shift);
+      __ sub(right_shift, left_shift, right_shift);
+
+      // Align to read 'data'
+      __ sub(data, G1, data_ptr);
+
+      // Load first 8 bytes of 'data'
+      __ ldx(data_ptr, 0, O4);
+      __ sllx(O4, left_shift, O4);
+      __ ldx(data_ptr, 8, O5);
+      __ srlx(O5, right_shift, G4);
+      __ bset(G4, O4);
+
+      // Load second 8 bytes of 'data'
+      __ sllx(O5, left_shift, O5);
+      __ ldx(data_ptr, 16, G4);
+      __ srlx(G4, right_shift, G4);
+      __ ba(L_main);
+      __ delayed()->bset(G4, O5);
+
+      // If 'data' is aligned, load normally
+      __ BIND(L_aligned);
+      __ ldx(data, 0, O4);
+      __ ldx(data, 8, O5);
+
+      __ BIND(L_main);
+      __ ldx(subkeyH, 0, O2);
+      __ ldx(subkeyH, 8, O3);
+
+      __ xor3(O0, O4, O0);
+      __ xor3(O1, O5, O1);
+
+      __ xmulxhi(O0, O3, G3);
+      __ xmulx(O0, O2, O5);
+      __ xmulxhi(O1, O2, G4);
+      __ xmulxhi(O1, O3, G5);
+      __ xmulx(O0, O3, G1);
+      __ xmulx(O1, O3, G2);
+      __ xmulx(O1, O2, O3);
+      __ xmulxhi(O0, O2, O4);
+
+      __ mov(0xE1, O0);
+      __ sllx(O0, 56, O0);
+
+      __ xor3(O5, G3, O5);
+      __ xor3(O5, G4, O5);
+      __ xor3(G5, G1, G1);
+      __ xor3(G1, O3, G1);
+      __ srlx(G2, 63, O1);
+      __ srlx(G1, 63, G3);
+      __ sllx(G2, 63, O3);
+      __ sllx(G2, 58, O2);
+      __ xor3(O3, O2, O2);
+
+      __ sllx(G1, 1, G1);
+      __ or3(G1, O1, G1);
+
+      __ xor3(G1, O2, G1);
+
+      __ sllx(G2, 1, G2);
+
+      __ xmulxhi(G1, O0, O1);
+      __ xmulx(G1, O0, O2);
+      __ xmulxhi(G2, O0, O3);
+      __ xmulx(G2, O0, G1);
+
+      __ xor3(O4, O1, O4);
+      __ xor3(O5, O2, O5);
+      __ xor3(O5, O3, O5);
+
+      __ sllx(O4, 1, O2);
+      __ srlx(O5, 63, O3);
+
+      __ or3(O2, O3, O0);
+
+      __ sllx(O5, 1, O1);
+      __ srlx(G1, 63, O2);
+      __ or3(O1, O2, O1);
+      __ xor3(O1, G3, O1);
+
+      __ deccc(len);
+      __ br(Assembler::notZero, true, Assembler::pt, L_ghash_loop);
+      __ delayed()->add(data, 16, data);
+
+      __ stx(O0, I0, 0);
+      __ stx(O1, I0, 8);
+
+      __ ret();
+      __ delayed()->restore();
+
+      return start;
+  }
+
+#define CHUNK_LEN   128          /* 128 x 8B = 1KB */
+#define CHUNK_K1    0x1307a0206  /* reverseBits(pow(x, CHUNK_LEN*8*8*3 - 32) mod P(x)) << 1 */
+#define CHUNK_K2    0x1a0f717c4  /* reverseBits(pow(x, CHUNK_LEN*8*8*2 - 32) mod P(x)) << 1 */
+#define CHUNK_K3    0x0170076fa  /* reverseBits(pow(x, CHUNK_LEN*8*8*1 - 32) mod P(x)) << 1 */
+
+  /**
+   *  Arguments:
+   *
+   * Inputs:
+   *   O0   - int   crc
+   *   O1   - byte* buf
+   *   O2   - int   len
+   *   O3   - int*  table
+   *
+   * Output:
+   *   O0   - int crc result
+   */
+  address generate_updateBytesCRC32C() {
+    assert(UseCRC32CIntrinsics, "need CRC32C instruction");
+
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32C");
+    address start = __ pc();
+
+    const Register crc   = O0;  // crc
+    const Register buf   = O1;  // source java byte array address
+    const Register len   = O2;  // number of bytes
+    const Register table = O3;  // byteTable
+
+    Label L_crc32c_head, L_crc32c_aligned;
+    Label L_crc32c_parallel, L_crc32c_parallel_loop;
+    Label L_crc32c_serial, L_crc32c_x32_loop, L_crc32c_x8, L_crc32c_x8_loop;
+    Label L_crc32c_done, L_crc32c_tail, L_crc32c_return;
+
+    __ cmp_and_br_short(len, 0, Assembler::lessEqual, Assembler::pn, L_crc32c_return);
+
+    // clear upper 32 bits of crc
+    __ clruwu(crc);
+
+    __ and3(buf, 7, G4);
+    __ cmp_and_brx_short(G4, 0, Assembler::equal, Assembler::pt, L_crc32c_aligned);
+
+    __ mov(8, G1);
+    __ sub(G1, G4, G4);
+
+    // ------ process the misaligned head (7 bytes or less) ------
+    __ BIND(L_crc32c_head);
+
+    // crc = (crc >>> 8) ^ byteTable[(crc ^ b) & 0xFF];
+    __ ldub(buf, 0, G1);
+    __ update_byte_crc32(crc, G1, table);
+
+    __ inc(buf);
+    __ dec(len);
+    __ cmp_and_br_short(len, 0, Assembler::equal, Assembler::pn, L_crc32c_return);
+    __ dec(G4);
+    __ cmp_and_br_short(G4, 0, Assembler::greater, Assembler::pt, L_crc32c_head);
+
+    // ------ process the 8-byte-aligned body ------
+    __ BIND(L_crc32c_aligned);
+    __ nop();
+    __ cmp_and_br_short(len, 8, Assembler::less, Assembler::pn, L_crc32c_tail);
+
+    // reverse the byte order of lower 32 bits to big endian, and move to FP side
+    __ movitof_revbytes(crc, F0, G1, G3);
+
+    __ set(CHUNK_LEN*8*4, G4);
+    __ cmp_and_br_short(len, G4, Assembler::less, Assembler::pt, L_crc32c_serial);
+
+    // ------ process four 1KB chunks in parallel ------
+    __ BIND(L_crc32c_parallel);
+
+    __ fzero(FloatRegisterImpl::D, F2);
+    __ fzero(FloatRegisterImpl::D, F4);
+    __ fzero(FloatRegisterImpl::D, F6);
+
+    __ mov(CHUNK_LEN - 1, G4);
+    __ BIND(L_crc32c_parallel_loop);
+    // schedule ldf's ahead of crc32c's to hide the load-use latency
+    __ ldf(FloatRegisterImpl::D, buf, 0,            F8);
+    __ ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*8,  F10);
+    __ ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*16, F12);
+    __ ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*24, F14);
+    __ crc32c(F0, F8,  F0);
+    __ crc32c(F2, F10, F2);
+    __ crc32c(F4, F12, F4);
+    __ crc32c(F6, F14, F6);
+    __ inc(buf, 8);
+    __ dec(G4);
+    __ cmp_and_br_short(G4, 0, Assembler::greater, Assembler::pt, L_crc32c_parallel_loop);
+
+    __ ldf(FloatRegisterImpl::D, buf, 0,            F8);
+    __ ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*8,  F10);
+    __ ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*16, F12);
+    __ crc32c(F0, F8,  F0);
+    __ crc32c(F2, F10, F2);
+    __ crc32c(F4, F12, F4);
+
+    __ inc(buf, CHUNK_LEN*24);
+    __ ldfl(FloatRegisterImpl::D, buf, G0, F14);  // load in little endian
+    __ inc(buf, 8);
+
+    __ prefetch(buf, 0,            Assembler::severalReads);
+    __ prefetch(buf, CHUNK_LEN*8,  Assembler::severalReads);
+    __ prefetch(buf, CHUNK_LEN*16, Assembler::severalReads);
+    __ prefetch(buf, CHUNK_LEN*24, Assembler::severalReads);
+
+    // move to INT side, and reverse the byte order of lower 32 bits to little endian
+    __ movftoi_revbytes(F0, O4, G1, G4);
+    __ movftoi_revbytes(F2, O5, G1, G4);
+    __ movftoi_revbytes(F4, G5, G1, G4);
+
+    // combine the results of 4 chunks
+    __ set64(CHUNK_K1, G3, G1);
+    __ xmulx(O4, G3, O4);
+    __ set64(CHUNK_K2, G3, G1);
+    __ xmulx(O5, G3, O5);
+    __ set64(CHUNK_K3, G3, G1);
+    __ xmulx(G5, G3, G5);
+
+    __ movdtox(F14, G4);
+    __ xor3(O4, O5, O5);
+    __ xor3(G5, O5, O5);
+    __ xor3(G4, O5, O5);
+
+    // reverse the byte order to big endian, via stack, and move to FP side
+    __ add(SP, -8, G1);
+    __ srlx(G1, 3, G1);
+    __ sllx(G1, 3, G1);
+    __ stx(O5, G1, G0);
+    __ ldfl(FloatRegisterImpl::D, G1, G0, F2);  // load in little endian
+
+    __ crc32c(F6, F2, F0);
+
+    __ set(CHUNK_LEN*8*4, G4);
+    __ sub(len, G4, len);
+    __ cmp_and_br_short(len, G4, Assembler::greaterEqual, Assembler::pt, L_crc32c_parallel);
+    __ nop();
+    __ cmp_and_br_short(len, 0, Assembler::equal, Assembler::pt, L_crc32c_done);
+
+    __ BIND(L_crc32c_serial);
+
+    __ mov(32, G4);
+    __ cmp_and_br_short(len, G4, Assembler::less, Assembler::pn, L_crc32c_x8);
+
+    // ------ process 32B chunks ------
+    __ BIND(L_crc32c_x32_loop);
+    __ ldf(FloatRegisterImpl::D, buf, 0, F2);
+    __ inc(buf, 8);
+    __ crc32c(F0, F2, F0);
+    __ ldf(FloatRegisterImpl::D, buf, 0, F2);
+    __ inc(buf, 8);
+    __ crc32c(F0, F2, F0);
+    __ ldf(FloatRegisterImpl::D, buf, 0, F2);
+    __ inc(buf, 8);
+    __ crc32c(F0, F2, F0);
+    __ ldf(FloatRegisterImpl::D, buf, 0, F2);
+    __ inc(buf, 8);
+    __ crc32c(F0, F2, F0);
+    __ dec(len, 32);
+    __ cmp_and_br_short(len, G4, Assembler::greaterEqual, Assembler::pt, L_crc32c_x32_loop);
+
+    __ BIND(L_crc32c_x8);
+    __ nop();
+    __ cmp_and_br_short(len, 8, Assembler::less, Assembler::pt, L_crc32c_done);
+
+    // ------ process 8B chunks ------
+    __ BIND(L_crc32c_x8_loop);
+    __ ldf(FloatRegisterImpl::D, buf, 0, F2);
+    __ inc(buf, 8);
+    __ crc32c(F0, F2, F0);
+    __ dec(len, 8);
+    __ cmp_and_br_short(len, 8, Assembler::greaterEqual, Assembler::pt, L_crc32c_x8_loop);
+
+    __ BIND(L_crc32c_done);
+
+    // move to INT side, and reverse the byte order of lower 32 bits to little endian
+    __ movftoi_revbytes(F0, crc, G1, G3);
+
+    __ cmp_and_br_short(len, 0, Assembler::equal, Assembler::pt, L_crc32c_return);
+
+    // ------ process the misaligned tail (7 bytes or less) ------
+    __ BIND(L_crc32c_tail);
+
+    // crc = (crc >>> 8) ^ byteTable[(crc ^ b) & 0xFF];
+    __ ldub(buf, 0, G1);
+    __ update_byte_crc32(crc, G1, table);
+
+    __ inc(buf);
+    __ dec(len);
+    __ cmp_and_br_short(len, 0, Assembler::greater, Assembler::pt, L_crc32c_tail);
+
+    __ BIND(L_crc32c_return);
+    __ nop();
+    __ retl();
+    __ delayed()->nop();
+
+    return start;
+  }
+
   void generate_initial() {
     // Generates all stubs and initializes the entry points
 
@@ -4859,6 +5183,10 @@
       StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
       StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel();
     }
+    // generate GHASH intrinsics code
+    if (UseGHASHIntrinsics) {
+      StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks();
+    }
 
     // generate SHA1/SHA256/SHA512 intrinsics code
     if (UseSHA1Intrinsics) {
@@ -4873,6 +5201,11 @@
       StubRoutines::_sha512_implCompress   = generate_sha512_implCompress(false, "sha512_implCompress");
       StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true,  "sha512_implCompressMB");
     }
+
+    // generate CRC32C intrinsic code
+    if (UseCRC32CIntrinsics) {
+      StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C();
+    }
   }
 
 
--- a/hotspot/src/cpu/sparc/vm/stubRoutines_sparc.hpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/cpu/sparc/vm/stubRoutines_sparc.hpp	Thu Jul 02 14:20:36 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,7 +41,7 @@
 enum /* platform_dependent_constants */ {
   // %%%%%%%% May be able to shrink this a lot
   code_size1 = 20000,           // simply increase if too small (assembler will crash if too small)
-  code_size2 = 23000            // simply increase if too small (assembler will crash if too small)
+  code_size2 = 24000            // simply increase if too small (assembler will crash if too small)
 };
 
 class Sparc {
--- a/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp	Thu Jul 02 14:20:36 2015 -0700
@@ -230,7 +230,7 @@
   assert((OptoLoopAlignment % relocInfo::addr_unit()) == 0, "alignment is not a multiple of NOP size");
 
   char buf[512];
-  jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+  jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
                (has_v9() ? ", v9" : (has_v8() ? ", v8" : "")),
                (has_hardware_popc() ? ", popc" : ""),
                (has_vis1() ? ", vis1" : ""),
@@ -242,6 +242,7 @@
                (has_sha1() ? ", sha1" : ""),
                (has_sha256() ? ", sha256" : ""),
                (has_sha512() ? ", sha512" : ""),
+               (has_crc32c() ? ", crc32c" : ""),
                (is_ultra3() ? ", ultra3" : ""),
                (is_sun4v() ? ", sun4v" : ""),
                (is_niagara_plus() ? ", niagara_plus" : (is_niagara() ? ", niagara" : "")),
@@ -300,6 +301,17 @@
     }
   }
 
+  // GHASH/GCM intrinsics
+  if (has_vis3() && (UseVIS > 2)) {
+    if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) {
+      UseGHASHIntrinsics = true;
+    }
+  } else if (UseGHASHIntrinsics) {
+    if (!FLAG_IS_DEFAULT(UseGHASHIntrinsics))
+      warning("GHASH intrinsics require VIS3 insructions support. Intriniscs will be disabled");
+    FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
+  }
+
   // SHA1, SHA256, and SHA512 instructions were added to SPARC T-series at different times
   if (has_sha1() || has_sha256() || has_sha512()) {
     if (UseVIS > 0) { // SHA intrinsics use VIS1 instructions
@@ -352,6 +364,23 @@
     }
   }
 
+  // SPARC T4 and above should have support for CRC32C instruction
+  if (has_crc32c()) {
+    if (UseVIS > 2) { // CRC32C intrinsics use VIS3 instructions
+      if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
+        FLAG_SET_DEFAULT(UseCRC32CIntrinsics, true);
+      }
+    } else {
+      if (UseCRC32CIntrinsics) {
+        warning("SPARC CRC32C intrinsics require VIS3 instruction support. Intrinsics will be disabled.");
+        FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
+      }
+    }
+  } else if (UseCRC32CIntrinsics) {
+    warning("CRC32C instruction is not available on this CPU");
+    FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
+  }
+
   if (FLAG_IS_DEFAULT(ContendedPaddingWidth) &&
     (cache_line_size > ContendedPaddingWidth))
     ContendedPaddingWidth = cache_line_size;
--- a/hotspot/src/cpu/sparc/vm/vm_version_sparc.hpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/cpu/sparc/vm/vm_version_sparc.hpp	Thu Jul 02 14:20:36 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,7 +53,8 @@
     aes_instructions     = 19,
     sha1_instruction     = 20,
     sha256_instruction   = 21,
-    sha512_instruction   = 22
+    sha512_instruction   = 22,
+    crc32c_instruction   = 23
   };
 
   enum Feature_Flag_Set {
@@ -83,6 +84,7 @@
     sha1_instruction_m      = 1 << sha1_instruction,
     sha256_instruction_m    = 1 << sha256_instruction,
     sha512_instruction_m    = 1 << sha512_instruction,
+    crc32c_instruction_m    = 1 << crc32c_instruction,
 
     generic_v8_m        = v8_instructions_m | hardware_mul32_m | hardware_div32_m | hardware_fsmuld_m,
     generic_v9_m        = generic_v8_m | v9_instructions_m,
@@ -141,6 +143,7 @@
   static bool has_sha1()                { return (_features & sha1_instruction_m) != 0; }
   static bool has_sha256()              { return (_features & sha256_instruction_m) != 0; }
   static bool has_sha512()              { return (_features & sha512_instruction_m) != 0; }
+  static bool has_crc32c()              { return (_features & crc32c_instruction_m) != 0; }
 
   static bool supports_compare_and_exchange()
                                         { return has_v9(); }
--- a/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Thu Jul 02 14:20:36 2015 -0700
@@ -1347,7 +1347,7 @@
 
 void Assembler::andnl(Register dst, Register src1, Register src2) {
   assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
-  int encode = vex_prefix_0F38_and_encode(dst, src1, src2, false);
+  int encode = vex_prefix_0F38_and_encode_legacy(dst, src1, src2, false);
   emit_int8((unsigned char)0xF2);
   emit_int8((unsigned char)(0xC0 | encode));
 }
@@ -1355,7 +1355,7 @@
 void Assembler::andnl(Register dst, Register src1, Address src2) {
   InstructionMark im(this);
   assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
-  vex_prefix_0F38(dst, src1, src2, false);
+  vex_prefix_0F38_legacy(dst, src1, src2, false);
   emit_int8((unsigned char)0xF2);
   emit_operand(dst, src2);
 }
@@ -1382,7 +1382,7 @@
 
 void Assembler::blsil(Register dst, Register src) {
   assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
-  int encode = vex_prefix_0F38_and_encode(rbx, dst, src, false);
+  int encode = vex_prefix_0F38_and_encode_legacy(rbx, dst, src, false);
   emit_int8((unsigned char)0xF3);
   emit_int8((unsigned char)(0xC0 | encode));
 }
@@ -1390,14 +1390,14 @@
 void Assembler::blsil(Register dst, Address src) {
   InstructionMark im(this);
   assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
-  vex_prefix_0F38(rbx, dst, src, false);
+  vex_prefix_0F38_legacy(rbx, dst, src, false);
   emit_int8((unsigned char)0xF3);
   emit_operand(rbx, src);
 }
 
 void Assembler::blsmskl(Register dst, Register src) {
   assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
-  int encode = vex_prefix_0F38_and_encode(rdx, dst, src, false);
+  int encode = vex_prefix_0F38_and_encode_legacy(rdx, dst, src, false);
   emit_int8((unsigned char)0xF3);
   emit_int8((unsigned char)(0xC0 | encode));
 }
@@ -1412,7 +1412,7 @@
 
 void Assembler::blsrl(Register dst, Register src) {
   assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
-  int encode = vex_prefix_0F38_and_encode(rcx, dst, src, false);
+  int encode = vex_prefix_0F38_and_encode_legacy(rcx, dst, src, false);
   emit_int8((unsigned char)0xF3);
   emit_int8((unsigned char)(0xC0 | encode));
 }
@@ -1420,7 +1420,7 @@
 void Assembler::blsrl(Register dst, Address src) {
   InstructionMark im(this);
   assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
-  vex_prefix_0F38(rcx, dst, src, false);
+  vex_prefix_0F38_legacy(rcx, dst, src, false);
   emit_int8((unsigned char)0xF3);
   emit_operand(rcx, src);
 }
@@ -3095,8 +3095,16 @@
 void Assembler::psrldq(XMMRegister dst, int shift) {
   // Shift 128 bit value in xmm register by number of bytes.
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  int encode = simd_prefix_and_encode(xmm3, dst, dst, VEX_SIMD_66, true, VEX_OPCODE_0F,
-                                      false, AVX_128bit, (VM_Version::supports_avx512bw() == false));
+  int encode = simd_prefix_and_encode(xmm3, dst, dst, VEX_SIMD_66, true, VEX_OPCODE_0F, false, AVX_128bit, (VM_Version::supports_avx512bw() == false));
+  emit_int8(0x73);
+  emit_int8((unsigned char)(0xC0 | encode));
+  emit_int8(shift);
+}
+
+void Assembler::pslldq(XMMRegister dst, int shift) {
+  // Shift left 128 bit value in xmm register by number of bytes.
+  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+  int encode = simd_prefix_and_encode(xmm7, dst, dst, VEX_SIMD_66, true, VEX_OPCODE_0F, false, AVX_128bit, (VM_Version::supports_avx512bw() == false));
   emit_int8(0x73);
   emit_int8((unsigned char)(0xC0 | encode));
   emit_int8(shift);
@@ -3106,15 +3114,16 @@
   assert(VM_Version::supports_sse4_1(), "");
   assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
   InstructionMark im(this);
-  simd_prefix(dst, src, VEX_SIMD_66, false, VEX_OPCODE_0F_38);
+  simd_prefix(dst, xnoreg, src, VEX_SIMD_66, false,
+              VEX_OPCODE_0F_38, false, AVX_128bit, true);
   emit_int8(0x17);
   emit_operand(dst, src);
 }
 
 void Assembler::ptest(XMMRegister dst, XMMRegister src) {
   assert(VM_Version::supports_sse4_1(), "");
-  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66,
-                                      false, VEX_OPCODE_0F_38);
+  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, false,
+                                      VEX_OPCODE_0F_38, false, AVX_128bit, true);
   emit_int8(0x17);
   emit_int8((unsigned char)(0xC0 | encode));
 }
@@ -3126,7 +3135,7 @@
   assert(dst != xnoreg, "sanity");
   int dst_enc = dst->encoding();
   // swap src<->dst for encoding
-  vex_prefix(src, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector_len);
+  vex_prefix(src, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector_len, true, false);
   emit_int8(0x17);
   emit_operand(dst, src);
 }
@@ -3135,7 +3144,7 @@
   assert(VM_Version::supports_avx(), "");
   int vector_len = AVX_256bit;
   int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66,
-                                     vector_len, VEX_OPCODE_0F_38);
+                                     vector_len, VEX_OPCODE_0F_38, true, false);
   emit_int8(0x17);
   emit_int8((unsigned char)(0xC0 | encode));
 }
@@ -3146,12 +3155,12 @@
   if (VM_Version::supports_evex()) {
     tuple_type = EVEX_FVM;
   }
-  emit_simd_arith(0x60, dst, src, VEX_SIMD_66);
+  emit_simd_arith(0x60, dst, src, VEX_SIMD_66, false, (VM_Version::supports_avx512vlbw() == false));
 }
 
 void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  emit_simd_arith(0x60, dst, src, VEX_SIMD_66);
+  emit_simd_arith(0x60, dst, src, VEX_SIMD_66, false, (VM_Version::supports_avx512vlbw() == false));
 }
 
 void Assembler::punpckldq(XMMRegister dst, Address src) {
@@ -4979,7 +4988,51 @@
   emit_int8((unsigned char)(0xC0 | encode));
 }
 
-// duplicate 4-bytes integer data from src into 8 locations in dest
+// duplicate 1-byte integer data from src into 16||32|64 locations in dest : requires AVX512BW and AVX512VL
+void Assembler::evpbroadcastb(XMMRegister dst, XMMRegister src, int vector_len) {
+  assert(VM_Version::supports_evex(), "");
+  int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66,
+                                     vector_len, VEX_OPCODE_0F_38, false);
+  emit_int8(0x78);
+  emit_int8((unsigned char)(0xC0 | encode));
+}
+
+void Assembler::evpbroadcastb(XMMRegister dst, Address src, int vector_len) {
+  assert(VM_Version::supports_evex(), "");
+  tuple_type = EVEX_T1S;
+  input_size_in_bits = EVEX_8bit;
+  InstructionMark im(this);
+  assert(dst != xnoreg, "sanity");
+  int dst_enc = dst->encoding();
+  // swap src<->dst for encoding
+  vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector_len);
+  emit_int8(0x78);
+  emit_operand(dst, src);
+}
+
+// duplicate 2-byte integer data from src into 8|16||32 locations in dest : requires AVX512BW and AVX512VL
+void Assembler::evpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len) {
+  assert(VM_Version::supports_evex(), "");
+  int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66,
+                                     vector_len, VEX_OPCODE_0F_38, false);
+  emit_int8(0x79);
+  emit_int8((unsigned char)(0xC0 | encode));
+}
+
+void Assembler::evpbroadcastw(XMMRegister dst, Address src, int vector_len) {
+  assert(VM_Version::supports_evex(), "");
+  tuple_type = EVEX_T1S;
+  input_size_in_bits = EVEX_16bit;
+  InstructionMark im(this);
+  assert(dst != xnoreg, "sanity");
+  int dst_enc = dst->encoding();
+  // swap src<->dst for encoding
+  vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector_len);
+  emit_int8(0x79);
+  emit_operand(dst, src);
+}
+
+// duplicate 4-byte integer data from src into 4|8|16 locations in dest : requires AVX512VL
 void Assembler::evpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_evex(), "");
   int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66,
@@ -4988,6 +5041,121 @@
   emit_int8((unsigned char)(0xC0 | encode));
 }
 
+void Assembler::evpbroadcastd(XMMRegister dst, Address src, int vector_len) {
+  assert(VM_Version::supports_evex(), "");
+  tuple_type = EVEX_T1S;
+  input_size_in_bits = EVEX_32bit;
+  InstructionMark im(this);
+  assert(dst != xnoreg, "sanity");
+  int dst_enc = dst->encoding();
+  // swap src<->dst for encoding
+  vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector_len);
+  emit_int8(0x58);
+  emit_operand(dst, src);
+}
+
+// duplicate 8-byte integer data from src into 4|8|16 locations in dest : requires AVX512VL
+void Assembler::evpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len) {
+  assert(VM_Version::supports_evex(), "");
+  int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66,
+                                     VEX_OPCODE_0F_38, true, vector_len, false, false);
+  emit_int8(0x59);
+  emit_int8((unsigned char)(0xC0 | encode));
+}
+
+void Assembler::evpbroadcastq(XMMRegister dst, Address src, int vector_len) {
+  assert(VM_Version::supports_evex(), "");
+  tuple_type = EVEX_T1S;
+  input_size_in_bits = EVEX_64bit;
+  InstructionMark im(this);
+  assert(dst != xnoreg, "sanity");
+  int dst_enc = dst->encoding();
+  // swap src<->dst for encoding
+  vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, true, vector_len);
+  emit_int8(0x59);
+  emit_operand(dst, src);
+}
+
+// duplicate single precision fp from src into 4|8|16 locations in dest : requires AVX512VL
+void Assembler::evpbroadcastss(XMMRegister dst, XMMRegister src, int vector_len) {
+  assert(VM_Version::supports_evex(), "");
+  int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66,
+                                     VEX_OPCODE_0F_38, false, vector_len, false, false);
+  emit_int8(0x18);
+  emit_int8((unsigned char)(0xC0 | encode));
+}
+
+void Assembler::evpbroadcastss(XMMRegister dst, Address src, int vector_len) {
+  assert(VM_Version::supports_evex(), "");
+  tuple_type = EVEX_T1S;
+  input_size_in_bits = EVEX_32bit;
+  InstructionMark im(this);
+  assert(dst != xnoreg, "sanity");
+  int dst_enc = dst->encoding();
+  // swap src<->dst for encoding
+  vex_prefix(src, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector_len);
+  emit_int8(0x18);
+  emit_operand(dst, src);
+}
+
+// duplicate double precision fp from src into 2|4|8 locations in dest : requires AVX512VL
+void Assembler::evpbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len) {
+  assert(VM_Version::supports_evex(), "");
+  int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66,
+                                     VEX_OPCODE_0F_38, true, vector_len, false, false);
+  emit_int8(0x19);
+  emit_int8((unsigned char)(0xC0 | encode));
+}
+
+void Assembler::evpbroadcastsd(XMMRegister dst, Address src, int vector_len) {
+  assert(VM_Version::supports_evex(), "");
+  tuple_type = EVEX_T1S;
+  input_size_in_bits = EVEX_64bit;
+  InstructionMark im(this);
+  assert(dst != xnoreg, "sanity");
+  int dst_enc = dst->encoding();
+  // swap src<->dst for encoding
+  vex_prefix(src, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, true, vector_len);
+  emit_int8(0x19);
+  emit_operand(dst, src);
+}
+
+// duplicate 1-byte integer data from src into 16||32|64 locations in dest : requires AVX512BW and AVX512VL
+void Assembler::evpbroadcastb(XMMRegister dst, Register src, int vector_len) {
+  assert(VM_Version::supports_evex(), "");
+  int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66,
+                                     VEX_OPCODE_0F_38, false, vector_len, false, false);
+  emit_int8(0x7A);
+  emit_int8((unsigned char)(0xC0 | encode));
+}
+
+// duplicate 2-byte integer data from src into 8|16||32 locations in dest : requires AVX512BW and AVX512VL
+void Assembler::evpbroadcastw(XMMRegister dst, Register src, int vector_len) {
+  assert(VM_Version::supports_evex(), "");
+  int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66,
+                                     VEX_OPCODE_0F_38, false, vector_len, false, false);
+  emit_int8(0x7B);
+  emit_int8((unsigned char)(0xC0 | encode));
+}
+
+// duplicate 4-byte integer data from src into 4|8|16 locations in dest : requires AVX512VL
+void Assembler::evpbroadcastd(XMMRegister dst, Register src, int vector_len) {
+  assert(VM_Version::supports_evex(), "");
+  int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66,
+                                     VEX_OPCODE_0F_38, false, vector_len, false, false);
+  emit_int8(0x7C);
+  emit_int8((unsigned char)(0xC0 | encode));
+}
+
+// duplicate 8-byte integer data from src into 4|8|16 locations in dest : requires AVX512VL
+void Assembler::evpbroadcastq(XMMRegister dst, Register src, int vector_len) {
+  assert(VM_Version::supports_evex(), "");
+  int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66,
+                                     VEX_OPCODE_0F_38, true, vector_len, false, false);
+  emit_int8(0x7C);
+  emit_int8((unsigned char)(0xC0 | encode));
+}
+
 // Carry-Less Multiplication Quadword
 void Assembler::pclmulqdq(XMMRegister dst, XMMRegister src, int mask) {
   assert(VM_Version::supports_clmul(), "");
@@ -5598,7 +5766,7 @@
 
 void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix pre,
                            VexOpcode opc, bool vex_w, int vector_len, bool legacy_mode, bool no_mask_reg) {
-  bool vex_r = (xreg_enc >= 8);
+  bool vex_r = ((xreg_enc & 8) == 8) ? 1 : 0;
   bool vex_b = adr.base_needs_rex();
   bool vex_x = adr.index_needs_rex();
   avx_vector_len = vector_len;
@@ -5626,8 +5794,8 @@
 
 int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc,
                                      bool vex_w, int vector_len, bool legacy_mode, bool no_mask_reg ) {
-  bool vex_r = (dst_enc >= 8);
-  bool vex_b = (src_enc >= 8);
+  bool vex_r = ((dst_enc & 8) == 8) ? 1 : 0;
+  bool vex_b = ((src_enc & 8) == 8) ? 1 : 0;
   bool vex_x = false;
   avx_vector_len = vector_len;
 
@@ -6272,19 +6440,15 @@
 
 void Assembler::andnq(Register dst, Register src1, Register src2) {
   assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
-  int encode = vex_prefix_0F38_and_encode_q(dst, src1, src2);
+  int encode = vex_prefix_0F38_and_encode_q_legacy(dst, src1, src2);
   emit_int8((unsigned char)0xF2);
   emit_int8((unsigned char)(0xC0 | encode));
 }
 
 void Assembler::andnq(Register dst, Register src1, Address src2) {
-  if (VM_Version::supports_evex()) {
-    tuple_type = EVEX_T1S;
-    input_size_in_bits = EVEX_64bit;
-  }
   InstructionMark im(this);
   assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
-  vex_prefix_0F38_q(dst, src1, src2);
+  vex_prefix_0F38_q_legacy(dst, src1, src2);
   emit_int8((unsigned char)0xF2);
   emit_operand(dst, src2);
 }
@@ -6311,7 +6475,7 @@
 
 void Assembler::blsiq(Register dst, Register src) {
   assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
-  int encode = vex_prefix_0F38_and_encode_q(rbx, dst, src);
+  int encode = vex_prefix_0F38_and_encode_q_legacy(rbx, dst, src);
   emit_int8((unsigned char)0xF3);
   emit_int8((unsigned char)(0xC0 | encode));
 }
@@ -6319,14 +6483,14 @@
 void Assembler::blsiq(Register dst, Address src) {
   InstructionMark im(this);
   assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
-  vex_prefix_0F38_q(rbx, dst, src);
+  vex_prefix_0F38_q_legacy(rbx, dst, src);
   emit_int8((unsigned char)0xF3);
   emit_operand(rbx, src);
 }
 
 void Assembler::blsmskq(Register dst, Register src) {
   assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
-  int encode = vex_prefix_0F38_and_encode_q(rdx, dst, src);
+  int encode = vex_prefix_0F38_and_encode_q_legacy(rdx, dst, src);
   emit_int8((unsigned char)0xF3);
   emit_int8((unsigned char)(0xC0 | encode));
 }
@@ -6334,14 +6498,14 @@
 void Assembler::blsmskq(Register dst, Address src) {
   InstructionMark im(this);
   assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
-  vex_prefix_0F38_q(rdx, dst, src);
+  vex_prefix_0F38_q_legacy(rdx, dst, src);
   emit_int8((unsigned char)0xF3);
   emit_operand(rdx, src);
 }
 
 void Assembler::blsrq(Register dst, Register src) {
   assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
-  int encode = vex_prefix_0F38_and_encode_q(rcx, dst, src);
+  int encode = vex_prefix_0F38_and_encode_q_legacy(rcx, dst, src);
   emit_int8((unsigned char)0xF3);
   emit_int8((unsigned char)(0xC0 | encode));
 }
@@ -6349,7 +6513,7 @@
 void Assembler::blsrq(Register dst, Address src) {
   InstructionMark im(this);
   assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
-  vex_prefix_0F38_q(rcx, dst, src);
+  vex_prefix_0F38_q_legacy(rcx, dst, src);
   emit_int8((unsigned char)0xF3);
   emit_operand(rcx, src);
 }
--- a/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Thu Jul 02 14:20:36 2015 -0700
@@ -661,6 +661,14 @@
                vector_len, no_mask_reg);
   }
 
+  void vex_prefix_0F38_legacy(Register dst, Register nds, Address src, bool no_mask_reg = false) {
+    bool vex_w = false;
+    int vector_len = AVX_128bit;
+    vex_prefix(src, nds->encoding(), dst->encoding(),
+               VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w,
+               vector_len, true, no_mask_reg);
+  }
+
   void vex_prefix_0F38_q(Register dst, Register nds, Address src, bool no_mask_reg = false) {
     bool vex_w = true;
     int vector_len = AVX_128bit;
@@ -668,6 +676,15 @@
                VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w,
                vector_len, no_mask_reg);
   }
+
+  void vex_prefix_0F38_q_legacy(Register dst, Register nds, Address src, bool no_mask_reg = false) {
+    bool vex_w = true;
+    int vector_len = AVX_128bit;
+    vex_prefix(src, nds->encoding(), dst->encoding(),
+               VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w,
+               vector_len, true, no_mask_reg);
+  }
+
   int  vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc,
                              VexSimdPrefix pre, VexOpcode opc,
                              bool vex_w, int vector_len,
@@ -680,6 +697,15 @@
                                  VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector_len,
                                  false, no_mask_reg);
   }
+
+  int  vex_prefix_0F38_and_encode_legacy(Register dst, Register nds, Register src, bool no_mask_reg = false) {
+    bool vex_w = false;
+    int vector_len = AVX_128bit;
+    return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(),
+      VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector_len,
+      true, no_mask_reg);
+  }
+
   int  vex_prefix_0F38_and_encode_q(Register dst, Register nds, Register src, bool no_mask_reg = false) {
     bool vex_w = true;
     int vector_len = AVX_128bit;
@@ -687,6 +713,15 @@
                                  VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector_len,
                                  false, no_mask_reg);
   }
+
+  int  vex_prefix_0F38_and_encode_q_legacy(Register dst, Register nds, Register src, bool no_mask_reg = false) {
+    bool vex_w = true;
+    int vector_len = AVX_128bit;
+    return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(),
+                                 VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector_len,
+                                 true, no_mask_reg);
+  }
+
   int  vex_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src,
                              VexSimdPrefix pre, int vector_len = AVX_128bit,
                              VexOpcode opc = VEX_OPCODE_0F, bool legacy_mode = false,
@@ -1666,6 +1701,8 @@
 
   // Shift Right by bytes Logical DoubleQuadword Immediate
   void psrldq(XMMRegister dst, int shift);
+  // Shift Left by bytes Logical DoubleQuadword Immediate
+  void pslldq(XMMRegister dst, int shift);
 
   // Logical Compare 128bit
   void ptest(XMMRegister dst, XMMRegister src);
@@ -2024,8 +2061,25 @@
   // duplicate 4-bytes integer data from src into 8 locations in dest
   void vpbroadcastd(XMMRegister dst, XMMRegister src);
 
-  // duplicate 4-bytes integer data from src into vector_len locations in dest
+  // duplicate n-bytes integer data from src into vector_len locations in dest
+  void evpbroadcastb(XMMRegister dst, XMMRegister src, int vector_len);
+  void evpbroadcastb(XMMRegister dst, Address src, int vector_len);
+  void evpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len);
+  void evpbroadcastw(XMMRegister dst, Address src, int vector_len);
   void evpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len);
+  void evpbroadcastd(XMMRegister dst, Address src, int vector_len);
+  void evpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len);
+  void evpbroadcastq(XMMRegister dst, Address src, int vector_len);
+
+  void evpbroadcastss(XMMRegister dst, XMMRegister src, int vector_len);
+  void evpbroadcastss(XMMRegister dst, Address src, int vector_len);
+  void evpbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len);
+  void evpbroadcastsd(XMMRegister dst, Address src, int vector_len);
+
+  void evpbroadcastb(XMMRegister dst, Register src, int vector_len);
+  void evpbroadcastw(XMMRegister dst, Register src, int vector_len);
+  void evpbroadcastd(XMMRegister dst, Register src, int vector_len);
+  void evpbroadcastq(XMMRegister dst, Register src, int vector_len);
 
   // Carry-Less Multiplication Quadword
   void pclmulqdq(XMMRegister dst, XMMRegister src, int mask);
--- a/hotspot/src/cpu/x86/vm/c2_init_x86.cpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/cpu/x86/vm/c2_init_x86.cpp	Thu Jul 02 14:20:36 2015 -0700
@@ -58,4 +58,6 @@
       OptoReg::invalidate(i);
     }
   }
+
+  SuperWordLoopUnrollAnalysis = true;
 }
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Thu Jul 02 14:20:36 2015 -0700
@@ -365,22 +365,22 @@
     map->set_callee_saved(STACK_OFFSET(xmm14H_off), xmm14->as_VMReg()->next());
     map->set_callee_saved(STACK_OFFSET(xmm15H_off), xmm15->as_VMReg()->next());
     if (UseAVX > 2) {
-      map->set_callee_saved(STACK_OFFSET(xmm16H_off), xmm16->as_VMReg());
-      map->set_callee_saved(STACK_OFFSET(xmm17H_off), xmm17->as_VMReg());
-      map->set_callee_saved(STACK_OFFSET(xmm18H_off), xmm18->as_VMReg());
-      map->set_callee_saved(STACK_OFFSET(xmm19H_off), xmm19->as_VMReg());
-      map->set_callee_saved(STACK_OFFSET(xmm20H_off), xmm20->as_VMReg());
-      map->set_callee_saved(STACK_OFFSET(xmm21H_off), xmm21->as_VMReg());
-      map->set_callee_saved(STACK_OFFSET(xmm22H_off), xmm22->as_VMReg());
-      map->set_callee_saved(STACK_OFFSET(xmm23H_off), xmm23->as_VMReg());
-      map->set_callee_saved(STACK_OFFSET(xmm24H_off), xmm24->as_VMReg());
-      map->set_callee_saved(STACK_OFFSET(xmm25H_off), xmm25->as_VMReg());
-      map->set_callee_saved(STACK_OFFSET(xmm26H_off), xmm26->as_VMReg());
-      map->set_callee_saved(STACK_OFFSET(xmm27H_off), xmm27->as_VMReg());
-      map->set_callee_saved(STACK_OFFSET(xmm28H_off), xmm28->as_VMReg());
-      map->set_callee_saved(STACK_OFFSET(xmm29H_off), xmm29->as_VMReg());
-      map->set_callee_saved(STACK_OFFSET(xmm30H_off), xmm30->as_VMReg());
-      map->set_callee_saved(STACK_OFFSET(xmm31H_off), xmm31->as_VMReg());
+      map->set_callee_saved(STACK_OFFSET(xmm16H_off), xmm16->as_VMReg()->next());
+      map->set_callee_saved(STACK_OFFSET(xmm17H_off), xmm17->as_VMReg()->next());
+      map->set_callee_saved(STACK_OFFSET(xmm18H_off), xmm18->as_VMReg()->next());
+      map->set_callee_saved(STACK_OFFSET(xmm19H_off), xmm19->as_VMReg()->next());
+      map->set_callee_saved(STACK_OFFSET(xmm20H_off), xmm20->as_VMReg()->next());
+      map->set_callee_saved(STACK_OFFSET(xmm21H_off), xmm21->as_VMReg()->next());
+      map->set_callee_saved(STACK_OFFSET(xmm22H_off), xmm22->as_VMReg()->next());
+      map->set_callee_saved(STACK_OFFSET(xmm23H_off), xmm23->as_VMReg()->next());
+      map->set_callee_saved(STACK_OFFSET(xmm24H_off), xmm24->as_VMReg()->next());
+      map->set_callee_saved(STACK_OFFSET(xmm25H_off), xmm25->as_VMReg()->next());
+      map->set_callee_saved(STACK_OFFSET(xmm26H_off), xmm26->as_VMReg()->next());
+      map->set_callee_saved(STACK_OFFSET(xmm27H_off), xmm27->as_VMReg()->next());
+      map->set_callee_saved(STACK_OFFSET(xmm28H_off), xmm28->as_VMReg()->next());
+      map->set_callee_saved(STACK_OFFSET(xmm29H_off), xmm29->as_VMReg()->next());
+      map->set_callee_saved(STACK_OFFSET(xmm30H_off), xmm30->as_VMReg()->next());
+      map->set_callee_saved(STACK_OFFSET(xmm31H_off), xmm31->as_VMReg()->next());
     }
   }
 
@@ -466,7 +466,7 @@
       __ vinsertf64x4h(xmm29, Address(rsp, 928));
       __ vinsertf64x4h(xmm30, Address(rsp, 960));
       __ vinsertf64x4h(xmm31, Address(rsp, 992));
-      __ subptr(rsp, 1024);
+      __ addptr(rsp, 1024);
     }
   }
 #else
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Thu Jul 02 14:20:36 2015 -0700
@@ -2727,6 +2727,167 @@
     return start;
   }
 
+  // byte swap x86 long
+  address generate_ghash_long_swap_mask() {
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "ghash_long_swap_mask");
+    address start = __ pc();
+    __ emit_data(0x0b0a0908, relocInfo::none, 0);
+    __ emit_data(0x0f0e0d0c, relocInfo::none, 0);
+    __ emit_data(0x03020100, relocInfo::none, 0);
+    __ emit_data(0x07060504, relocInfo::none, 0);
+
+  return start;
+  }
+
+  // byte swap x86 byte array
+  address generate_ghash_byte_swap_mask() {
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "ghash_byte_swap_mask");
+    address start = __ pc();
+    __ emit_data(0x0c0d0e0f, relocInfo::none, 0);
+    __ emit_data(0x08090a0b, relocInfo::none, 0);
+    __ emit_data(0x04050607, relocInfo::none, 0);
+    __ emit_data(0x00010203, relocInfo::none, 0);
+  return start;
+  }
+
+  /* Single and multi-block ghash operations */
+  address generate_ghash_processBlocks() {
+    assert(UseGHASHIntrinsics, "need GHASH intrinsics and CLMUL support");
+    __ align(CodeEntryAlignment);
+    Label L_ghash_loop, L_exit;
+    StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks");
+    address start = __ pc();
+
+    const Register state        = rdi;
+    const Register subkeyH      = rsi;
+    const Register data         = rdx;
+    const Register blocks       = rcx;
+
+    const Address  state_param(rbp, 8+0);
+    const Address  subkeyH_param(rbp, 8+4);
+    const Address  data_param(rbp, 8+8);
+    const Address  blocks_param(rbp, 8+12);
+
+    const XMMRegister xmm_temp0 = xmm0;
+    const XMMRegister xmm_temp1 = xmm1;
+    const XMMRegister xmm_temp2 = xmm2;
+    const XMMRegister xmm_temp3 = xmm3;
+    const XMMRegister xmm_temp4 = xmm4;
+    const XMMRegister xmm_temp5 = xmm5;
+    const XMMRegister xmm_temp6 = xmm6;
+    const XMMRegister xmm_temp7 = xmm7;
+
+    __ enter();
+
+    __ movptr(state, state_param);
+    __ movptr(subkeyH, subkeyH_param);
+    __ movptr(data, data_param);
+    __ movptr(blocks, blocks_param);
+
+    __ movdqu(xmm_temp0, Address(state, 0));
+    __ pshufb(xmm_temp0, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr()));
+
+    __ movdqu(xmm_temp1, Address(subkeyH, 0));
+    __ pshufb(xmm_temp1, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr()));
+
+    __ BIND(L_ghash_loop);
+    __ movdqu(xmm_temp2, Address(data, 0));
+    __ pshufb(xmm_temp2, ExternalAddress(StubRoutines::x86::ghash_byte_swap_mask_addr()));
+
+    __ pxor(xmm_temp0, xmm_temp2);
+
+    //
+    // Multiply with the hash key
+    //
+    __ movdqu(xmm_temp3, xmm_temp0);
+    __ pclmulqdq(xmm_temp3, xmm_temp1, 0);      // xmm3 holds a0*b0
+    __ movdqu(xmm_temp4, xmm_temp0);
+    __ pclmulqdq(xmm_temp4, xmm_temp1, 16);     // xmm4 holds a0*b1
+
+    __ movdqu(xmm_temp5, xmm_temp0);
+    __ pclmulqdq(xmm_temp5, xmm_temp1, 1);      // xmm5 holds a1*b0
+    __ movdqu(xmm_temp6, xmm_temp0);
+    __ pclmulqdq(xmm_temp6, xmm_temp1, 17);     // xmm6 holds a1*b1
+
+    __ pxor(xmm_temp4, xmm_temp5);      // xmm4 holds a0*b1 + a1*b0
+
+    __ movdqu(xmm_temp5, xmm_temp4);    // move the contents of xmm4 to xmm5
+    __ psrldq(xmm_temp4, 8);    // shift by xmm4 64 bits to the right
+    __ pslldq(xmm_temp5, 8);    // shift by xmm5 64 bits to the left
+    __ pxor(xmm_temp3, xmm_temp5);
+    __ pxor(xmm_temp6, xmm_temp4);      // Register pair <xmm6:xmm3> holds the result
+                                        // of the carry-less multiplication of
+                                        // xmm0 by xmm1.
+
+    // We shift the result of the multiplication by one bit position
+    // to the left to cope for the fact that the bits are reversed.
+    __ movdqu(xmm_temp7, xmm_temp3);
+    __ movdqu(xmm_temp4, xmm_temp6);
+    __ pslld (xmm_temp3, 1);
+    __ pslld(xmm_temp6, 1);
+    __ psrld(xmm_temp7, 31);
+    __ psrld(xmm_temp4, 31);
+    __ movdqu(xmm_temp5, xmm_temp7);
+    __ pslldq(xmm_temp4, 4);
+    __ pslldq(xmm_temp7, 4);
+    __ psrldq(xmm_temp5, 12);
+    __ por(xmm_temp3, xmm_temp7);
+    __ por(xmm_temp6, xmm_temp4);
+    __ por(xmm_temp6, xmm_temp5);
+
+    //
+    // First phase of the reduction
+    //
+    // Move xmm3 into xmm4, xmm5, xmm7 in order to perform the shifts
+    // independently.
+    __ movdqu(xmm_temp7, xmm_temp3);
+    __ movdqu(xmm_temp4, xmm_temp3);
+    __ movdqu(xmm_temp5, xmm_temp3);
+    __ pslld(xmm_temp7, 31);    // packed right shift shifting << 31
+    __ pslld(xmm_temp4, 30);    // packed right shift shifting << 30
+    __ pslld(xmm_temp5, 25);    // packed right shift shifting << 25
+    __ pxor(xmm_temp7, xmm_temp4);      // xor the shifted versions
+    __ pxor(xmm_temp7, xmm_temp5);
+    __ movdqu(xmm_temp4, xmm_temp7);
+    __ pslldq(xmm_temp7, 12);
+    __ psrldq(xmm_temp4, 4);
+    __ pxor(xmm_temp3, xmm_temp7);      // first phase of the reduction complete
+
+    //
+    // Second phase of the reduction
+    //
+    // Make 3 copies of xmm3 in xmm2, xmm5, xmm7 for doing these
+    // shift operations.
+    __ movdqu(xmm_temp2, xmm_temp3);
+    __ movdqu(xmm_temp7, xmm_temp3);
+    __ movdqu(xmm_temp5, xmm_temp3);
+    __ psrld(xmm_temp2, 1);     // packed left shifting >> 1
+    __ psrld(xmm_temp7, 2);     // packed left shifting >> 2
+    __ psrld(xmm_temp5, 7);     // packed left shifting >> 7
+    __ pxor(xmm_temp2, xmm_temp7);      // xor the shifted versions
+    __ pxor(xmm_temp2, xmm_temp5);
+    __ pxor(xmm_temp2, xmm_temp4);
+    __ pxor(xmm_temp3, xmm_temp2);
+    __ pxor(xmm_temp6, xmm_temp3);      // the result is in xmm6
+
+    __ decrement(blocks);
+    __ jcc(Assembler::zero, L_exit);
+    __ movdqu(xmm_temp0, xmm_temp6);
+    __ addptr(data, 16);
+    __ jmp(L_ghash_loop);
+
+    __ BIND(L_exit);
+       // Byte swap 16-byte result
+    __ pshufb(xmm_temp6, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr()));
+    __ movdqu(Address(state, 0), xmm_temp6);   // store the result
+
+    __ leave();
+    __ ret(0);
+    return start;
+  }
+
   /**
    *  Arguments:
    *
@@ -3026,6 +3187,13 @@
       StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt();
     }
 
+    // Generate GHASH intrinsics code
+    if (UseGHASHIntrinsics) {
+      StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask();
+      StubRoutines::x86::_ghash_byte_swap_mask_addr = generate_ghash_byte_swap_mask();
+      StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks();
+    }
+
     // Safefetch stubs.
     generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry,
                                                    &StubRoutines::_safefetch32_fault_pc,
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Thu Jul 02 14:20:36 2015 -0700
@@ -382,7 +382,12 @@
 
     // restore regs belonging to calling function
 #ifdef _WIN64
-    for (int i = 15; i >= 6; i--) {
+    int xmm_ub = 15;
+    if (UseAVX > 2) {
+      xmm_ub = 31;
+    }
+    // emit the restores for xmm regs
+    for (int i = 6; i <= xmm_ub; i++) {
       __ movdqu(as_XMMRegister(i), xmm_save(i));
     }
 #endif
@@ -3681,6 +3686,175 @@
     return start;
   }
 
+
+  // byte swap x86 long
+  address generate_ghash_long_swap_mask() {
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "ghash_long_swap_mask");
+    address start = __ pc();
+    __ emit_data64(0x0f0e0d0c0b0a0908, relocInfo::none );
+    __ emit_data64(0x0706050403020100, relocInfo::none );
+  return start;
+  }
+
+  // byte swap x86 byte array
+  address generate_ghash_byte_swap_mask() {
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "ghash_byte_swap_mask");
+    address start = __ pc();
+    __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none );
+    __ emit_data64(0x0001020304050607, relocInfo::none );
+  return start;
+  }
+
+  /* Single and multi-block ghash operations */
+  address generate_ghash_processBlocks() {
+    __ align(CodeEntryAlignment);
+    Label L_ghash_loop, L_exit;
+    StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks");
+    address start = __ pc();
+
+    const Register state        = c_rarg0;
+    const Register subkeyH      = c_rarg1;
+    const Register data         = c_rarg2;
+    const Register blocks       = c_rarg3;
+
+#ifdef _WIN64
+    const int XMM_REG_LAST  = 10;
+#endif
+
+    const XMMRegister xmm_temp0 = xmm0;
+    const XMMRegister xmm_temp1 = xmm1;
+    const XMMRegister xmm_temp2 = xmm2;
+    const XMMRegister xmm_temp3 = xmm3;
+    const XMMRegister xmm_temp4 = xmm4;
+    const XMMRegister xmm_temp5 = xmm5;
+    const XMMRegister xmm_temp6 = xmm6;
+    const XMMRegister xmm_temp7 = xmm7;
+    const XMMRegister xmm_temp8 = xmm8;
+    const XMMRegister xmm_temp9 = xmm9;
+    const XMMRegister xmm_temp10 = xmm10;
+
+    __ enter();
+
+#ifdef _WIN64
+    // save the xmm registers which must be preserved 6-10
+    __ subptr(rsp, -rsp_after_call_off * wordSize);
+    for (int i = 6; i <= XMM_REG_LAST; i++) {
+      __ movdqu(xmm_save(i), as_XMMRegister(i));
+    }
+#endif
+
+    __ movdqu(xmm_temp10, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr()));
+
+    __ movdqu(xmm_temp0, Address(state, 0));
+    __ pshufb(xmm_temp0, xmm_temp10);
+
+
+    __ BIND(L_ghash_loop);
+    __ movdqu(xmm_temp2, Address(data, 0));
+    __ pshufb(xmm_temp2, ExternalAddress(StubRoutines::x86::ghash_byte_swap_mask_addr()));
+
+    __ movdqu(xmm_temp1, Address(subkeyH, 0));
+    __ pshufb(xmm_temp1, xmm_temp10);
+
+    __ pxor(xmm_temp0, xmm_temp2);
+
+    //
+    // Multiply with the hash key
+    //
+    __ movdqu(xmm_temp3, xmm_temp0);
+    __ pclmulqdq(xmm_temp3, xmm_temp1, 0);      // xmm3 holds a0*b0
+    __ movdqu(xmm_temp4, xmm_temp0);
+    __ pclmulqdq(xmm_temp4, xmm_temp1, 16);     // xmm4 holds a0*b1
+
+    __ movdqu(xmm_temp5, xmm_temp0);
+    __ pclmulqdq(xmm_temp5, xmm_temp1, 1);      // xmm5 holds a1*b0
+    __ movdqu(xmm_temp6, xmm_temp0);
+    __ pclmulqdq(xmm_temp6, xmm_temp1, 17);     // xmm6 holds a1*b1
+
+    __ pxor(xmm_temp4, xmm_temp5);      // xmm4 holds a0*b1 + a1*b0
+
+    __ movdqu(xmm_temp5, xmm_temp4);    // move the contents of xmm4 to xmm5
+    __ psrldq(xmm_temp4, 8);    // shift by xmm4 64 bits to the right
+    __ pslldq(xmm_temp5, 8);    // shift by xmm5 64 bits to the left
+    __ pxor(xmm_temp3, xmm_temp5);
+    __ pxor(xmm_temp6, xmm_temp4);      // Register pair <xmm6:xmm3> holds the result
+                                        // of the carry-less multiplication of
+                                        // xmm0 by xmm1.
+
+    // We shift the result of the multiplication by one bit position
+    // to the left to cope for the fact that the bits are reversed.
+    __ movdqu(xmm_temp7, xmm_temp3);
+    __ movdqu(xmm_temp8, xmm_temp6);
+    __ pslld(xmm_temp3, 1);
+    __ pslld(xmm_temp6, 1);
+    __ psrld(xmm_temp7, 31);
+    __ psrld(xmm_temp8, 31);
+    __ movdqu(xmm_temp9, xmm_temp7);
+    __ pslldq(xmm_temp8, 4);
+    __ pslldq(xmm_temp7, 4);
+    __ psrldq(xmm_temp9, 12);
+    __ por(xmm_temp3, xmm_temp7);
+    __ por(xmm_temp6, xmm_temp8);
+    __ por(xmm_temp6, xmm_temp9);
+
+    //
+    // First phase of the reduction
+    //
+    // Move xmm3 into xmm7, xmm8, xmm9 in order to perform the shifts
+    // independently.
+    __ movdqu(xmm_temp7, xmm_temp3);
+    __ movdqu(xmm_temp8, xmm_temp3);
+    __ movdqu(xmm_temp9, xmm_temp3);
+    __ pslld(xmm_temp7, 31);    // packed right shift shifting << 31
+    __ pslld(xmm_temp8, 30);    // packed right shift shifting << 30
+    __ pslld(xmm_temp9, 25);    // packed right shift shifting << 25
+    __ pxor(xmm_temp7, xmm_temp8);      // xor the shifted versions
+    __ pxor(xmm_temp7, xmm_temp9);
+    __ movdqu(xmm_temp8, xmm_temp7);
+    __ pslldq(xmm_temp7, 12);
+    __ psrldq(xmm_temp8, 4);
+    __ pxor(xmm_temp3, xmm_temp7);      // first phase of the reduction complete
+
+    //
+    // Second phase of the reduction
+    //
+    // Make 3 copies of xmm3 in xmm2, xmm4, xmm5 for doing these
+    // shift operations.
+    __ movdqu(xmm_temp2, xmm_temp3);
+    __ movdqu(xmm_temp4, xmm_temp3);
+    __ movdqu(xmm_temp5, xmm_temp3);
+    __ psrld(xmm_temp2, 1);     // packed left shifting >> 1
+    __ psrld(xmm_temp4, 2);     // packed left shifting >> 2
+    __ psrld(xmm_temp5, 7);     // packed left shifting >> 7
+    __ pxor(xmm_temp2, xmm_temp4);      // xor the shifted versions
+    __ pxor(xmm_temp2, xmm_temp5);
+    __ pxor(xmm_temp2, xmm_temp8);
+    __ pxor(xmm_temp3, xmm_temp2);
+    __ pxor(xmm_temp6, xmm_temp3);      // the result is in xmm6
+
+    __ decrement(blocks);
+    __ jcc(Assembler::zero, L_exit);
+    __ movdqu(xmm_temp0, xmm_temp6);
+    __ addptr(data, 16);
+    __ jmp(L_ghash_loop);
+
+    __ BIND(L_exit);
+    __ pshufb(xmm_temp6, xmm_temp10);          // Byte swap 16-byte result
+    __ movdqu(Address(state, 0), xmm_temp6);   // store the result
+
+#ifdef _WIN64
+    // restore xmm regs belonging to calling function
+    for (int i = 6; i <= XMM_REG_LAST; i++) {
+      __ movdqu(as_XMMRegister(i), xmm_save(i));
+    }
+#endif
+    __ leave();
+    __ ret(0);
+    return start;
+  }
+
   /**
    *  Arguments:
    *
@@ -4120,6 +4294,13 @@
       StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel();
     }
 
+    // Generate GHASH intrinsics code
+    if (UseGHASHIntrinsics) {
+      StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask();
+      StubRoutines::x86::_ghash_byte_swap_mask_addr = generate_ghash_byte_swap_mask();
+      StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks();
+    }
+
     // Safefetch stubs.
     generate_safefetch("SafeFetch32", sizeof(int),     &StubRoutines::_safefetch32_entry,
                                                        &StubRoutines::_safefetch32_fault_pc,
--- a/hotspot/src/cpu/x86/vm/stubRoutines_x86.cpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/cpu/x86/vm/stubRoutines_x86.cpp	Thu Jul 02 14:20:36 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,8 @@
 
 address StubRoutines::x86::_verify_mxcsr_entry = NULL;
 address StubRoutines::x86::_key_shuffle_mask_addr = NULL;
+address StubRoutines::x86::_ghash_long_swap_mask_addr = NULL;
+address StubRoutines::x86::_ghash_byte_swap_mask_addr = NULL;
 
 uint64_t StubRoutines::x86::_crc_by128_masks[] =
 {
--- a/hotspot/src/cpu/x86/vm/stubRoutines_x86.hpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/cpu/x86/vm/stubRoutines_x86.hpp	Thu Jul 02 14:20:36 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,10 +36,15 @@
   // masks and table for CRC32
   static uint64_t _crc_by128_masks[];
   static juint    _crc_table[];
+  // swap mask for ghash
+  static address _ghash_long_swap_mask_addr;
+  static address _ghash_byte_swap_mask_addr;
 
  public:
   static address verify_mxcsr_entry()    { return _verify_mxcsr_entry; }
   static address key_shuffle_mask_addr() { return _key_shuffle_mask_addr; }
   static address crc_by128_masks_addr()  { return (address)_crc_by128_masks; }
+  static address ghash_long_swap_mask_addr() { return _ghash_long_swap_mask_addr; }
+  static address ghash_byte_swap_mask_addr() { return _ghash_byte_swap_mask_addr; }
 
 #endif // CPU_X86_VM_STUBROUTINES_X86_32_HPP
--- a/hotspot/src/cpu/x86/vm/stubRoutines_x86_64.hpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/cpu/x86/vm/stubRoutines_x86_64.hpp	Thu Jul 02 14:20:36 2015 -0700
@@ -33,7 +33,7 @@
 
 enum platform_dependent_constants {
   code_size1 = 19000,          // simply increase if too small (assembler will crash if too small)
-  code_size2 = 23000           // simply increase if too small (assembler will crash if too small)
+  code_size2 = 24000           // simply increase if too small (assembler will crash if too small)
 };
 
 class x86 {
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.cpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/cpu/x86/vm/vm_version_x86.cpp	Thu Jul 02 14:20:36 2015 -0700
@@ -677,6 +677,17 @@
     FLAG_SET_DEFAULT(UseAESIntrinsics, false);
   }
 
+  // GHASH/GCM intrinsics
+  if (UseCLMUL && (UseSSE > 2)) {
+    if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) {
+      UseGHASHIntrinsics = true;
+    }
+  } else if (UseGHASHIntrinsics) {
+    if (!FLAG_IS_DEFAULT(UseGHASHIntrinsics))
+      warning("GHASH intrinsic requires CLMUL and SSE2 instructions on this CPU");
+    FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
+  }
+
   if (UseSHA) {
     warning("SHA instructions are not available on this CPU");
     FLAG_SET_DEFAULT(UseSHA, false);
@@ -688,6 +699,12 @@
     FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
   }
 
+  if (UseCRC32CIntrinsics) {
+    if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics))
+      warning("CRC32C intrinsics are not available on this CPU");
+    FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
+  }
+
   // Adjust RTM (Restricted Transactional Memory) flags
   if (!supports_rtm() && UseRTMLocking) {
     // Can't continue because UseRTMLocking affects UseBiasedLocking flag
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.hpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/cpu/x86/vm/vm_version_x86.hpp	Thu Jul 02 14:20:36 2015 -0700
@@ -702,6 +702,7 @@
   static bool supports_avx512cd() { return (_cpuFeatures & CPU_AVX512CD) != 0; }
   static bool supports_avx512bw() { return (_cpuFeatures & CPU_AVX512BW) != 0; }
   static bool supports_avx512vl() { return (_cpuFeatures & CPU_AVX512VL) != 0; }
+  static bool supports_avx512vlbw() { return (supports_avx512bw() && supports_avx512vl()); }
   // Intel features
   static bool is_intel_family_core() { return is_intel() &&
                                        extended_cpu_family() == CPU_FAMILY_INTEL_CORE; }
--- a/hotspot/src/cpu/x86/vm/x86.ad	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/cpu/x86/vm/x86.ad	Thu Jul 02 14:20:36 2015 -0700
@@ -2894,6 +2894,457 @@
   ins_pipe( pipe_slow );
 %}
 
+// ====================LEGACY REPLICATE=======================================
+
+instruct Repl4B_mem(vecS dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 4 && UseAVX > 0 && !VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateB (LoadB mem)));
+  format %{ "punpcklbw $dst,$mem\n\t"
+            "pshuflw $dst,$dst,0x00\t! replicate4B" %}
+  ins_encode %{
+    __ punpcklbw($dst$$XMMRegister, $mem$$Address);
+    __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl8B_mem(vecD dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 8 && UseAVX > 0 && !VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateB (LoadB mem)));
+  format %{ "punpcklbw $dst,$mem\n\t"
+            "pshuflw $dst,$dst,0x00\t! replicate8B" %}
+  ins_encode %{
+    __ punpcklbw($dst$$XMMRegister, $mem$$Address);
+    __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl16B(vecX dst, rRegI src) %{
+  predicate(n->as_Vector()->length() == 16 && !VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateB src));
+  format %{ "movd    $dst,$src\n\t"
+            "punpcklbw $dst,$dst\n\t"
+            "pshuflw $dst,$dst,0x00\n\t"
+            "punpcklqdq $dst,$dst\t! replicate16B" %}
+  ins_encode %{
+    __ movdl($dst$$XMMRegister, $src$$Register);
+    __ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister);
+    __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
+    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl16B_mem(vecX dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 16 && UseAVX > 0 && !VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateB (LoadB mem)));
+  format %{ "punpcklbw $dst,$mem\n\t"
+            "pshuflw $dst,$dst,0x00\n\t"
+            "punpcklqdq $dst,$dst\t! replicate16B" %}
+  ins_encode %{
+    __ punpcklbw($dst$$XMMRegister, $mem$$Address);
+    __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
+    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl32B(vecY dst, rRegI src) %{
+  predicate(n->as_Vector()->length() == 32 && !VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateB src));
+  format %{ "movd    $dst,$src\n\t"
+            "punpcklbw $dst,$dst\n\t"
+            "pshuflw $dst,$dst,0x00\n\t"
+            "punpcklqdq $dst,$dst\n\t"
+            "vinserti128h $dst,$dst,$dst\t! replicate32B" %}
+  ins_encode %{
+    __ movdl($dst$$XMMRegister, $src$$Register);
+    __ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister);
+    __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
+    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
+    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl32B_mem(vecY dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 32 && !VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateB (LoadB mem)));
+  format %{ "punpcklbw $dst,$mem\n\t"
+            "pshuflw $dst,$dst,0x00\n\t"
+            "punpcklqdq $dst,$dst\n\t"
+            "vinserti128h $dst,$dst,$dst\t! replicate32B" %}
+  ins_encode %{
+    __ punpcklbw($dst$$XMMRegister, $mem$$Address);
+    __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
+    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
+    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl16B_imm(vecX dst, immI con) %{
+  predicate(n->as_Vector()->length() == 16 && !VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateB con));
+  format %{ "movq    $dst,[$constantaddress]\n\t"
+            "punpcklqdq $dst,$dst\t! replicate16B($con)" %}
+  ins_encode %{
+    __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1)));
+    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl32B_imm(vecY dst, immI con) %{
+  predicate(n->as_Vector()->length() == 32 && !VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateB con));
+  format %{ "movq    $dst,[$constantaddress]\n\t"
+            "punpcklqdq $dst,$dst\n\t"
+            "vinserti128h $dst,$dst,$dst\t! lreplicate32B($con)" %}
+  ins_encode %{
+    __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1)));
+    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
+    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl4S(vecD dst, rRegI src) %{
+  predicate(n->as_Vector()->length() == 4 && !VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateS src));
+  format %{ "movd    $dst,$src\n\t"
+            "pshuflw $dst,$dst,0x00\t! replicate4S" %}
+  ins_encode %{
+    __ movdl($dst$$XMMRegister, $src$$Register);
+    __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl4S_mem(vecD dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 4 && UseAVX > 0 && !VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateS (LoadS mem)));
+  format %{ "pshuflw $dst,$mem,0x00\t! replicate4S" %}
+  ins_encode %{
+    __ pshuflw($dst$$XMMRegister, $mem$$Address, 0x00);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl8S(vecX dst, rRegI src) %{
+  predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateS src));
+  format %{ "movd    $dst,$src\n\t"
+            "pshuflw $dst,$dst,0x00\n\t"
+            "punpcklqdq $dst,$dst\t! replicate8S" %}
+  ins_encode %{
+    __ movdl($dst$$XMMRegister, $src$$Register);
+    __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
+    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl8S_mem(vecX dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 8 && UseAVX > 0 && !VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateS (LoadS mem)));
+  format %{ "pshuflw $dst,$mem,0x00\n\t"
+            "punpcklqdq $dst,$dst\t! replicate8S" %}
+  ins_encode %{
+    __ pshuflw($dst$$XMMRegister, $mem$$Address, 0x00);
+    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl8S_imm(vecX dst, immI con) %{
+  predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateS con));
+  format %{ "movq    $dst,[$constantaddress]\n\t"
+            "punpcklqdq $dst,$dst\t! replicate8S($con)" %}
+  ins_encode %{
+    __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2)));
+    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl16S(vecY dst, rRegI src) %{
+  predicate(n->as_Vector()->length() == 16 && !VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateS src));
+  format %{ "movd    $dst,$src\n\t"
+            "pshuflw $dst,$dst,0x00\n\t"
+            "punpcklqdq $dst,$dst\n\t"
+            "vinserti128h $dst,$dst,$dst\t! replicate16S" %}
+  ins_encode %{
+    __ movdl($dst$$XMMRegister, $src$$Register);
+    __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
+    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
+    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl16S_mem(vecY dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 16 && !VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateS (LoadS mem)));
+  format %{ "pshuflw $dst,$mem,0x00\n\t"
+            "punpcklqdq $dst,$dst\n\t"
+            "vinserti128h $dst,$dst,$dst\t! replicate16S" %}
+  ins_encode %{
+    __ pshuflw($dst$$XMMRegister, $mem$$Address, 0x00);
+    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
+    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl16S_imm(vecY dst, immI con) %{
+  predicate(n->as_Vector()->length() == 16 && !VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateS con));
+  format %{ "movq    $dst,[$constantaddress]\n\t"
+            "punpcklqdq $dst,$dst\n\t"
+            "vinserti128h $dst,$dst,$dst\t! replicate16S($con)" %}
+  ins_encode %{
+    __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2)));
+    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
+    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl4I(vecX dst, rRegI src) %{
+  predicate(n->as_Vector()->length() == 4 && !VM_Version::supports_avx512vl());
+  match(Set dst (ReplicateI src));
+  format %{ "movd    $dst,$src\n\t"
+            "pshufd  $dst,$dst,0x00\t! replicate4I" %}
+  ins_encode %{
+    __ movdl($dst$$XMMRegister, $src$$Register);
+    __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl4I_mem(vecX dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 4 && UseAVX > 0 && !VM_Version::supports_avx512vl());
+  match(Set dst (ReplicateI (LoadI mem)));
+  format %{ "pshufd  $dst,$mem,0x00\t! replicate4I" %}
+  ins_encode %{
+    __ pshufd($dst$$XMMRegister, $mem$$Address, 0x00);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl8I(vecY dst, rRegI src) %{
+  predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl());
+  match(Set dst (ReplicateI src));
+  format %{ "movd    $dst,$src\n\t"
+            "pshufd  $dst,$dst,0x00\n\t"
+            "vinserti128h $dst,$dst,$dst\t! replicate8I" %}
+  ins_encode %{
+    __ movdl($dst$$XMMRegister, $src$$Register);
+    __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
+    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl8I_mem(vecY dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl());
+  match(Set dst (ReplicateI (LoadI mem)));
+  format %{ "pshufd  $dst,$mem,0x00\n\t"
+            "vinserti128h $dst,$dst,$dst\t! replicate8I" %}
+  ins_encode %{
+    __ pshufd($dst$$XMMRegister, $mem$$Address, 0x00);
+    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl4I_imm(vecX dst, immI con) %{
+  predicate(n->as_Vector()->length() == 4 && !VM_Version::supports_avx512vl());
+  match(Set dst (ReplicateI con));
+  format %{ "movq    $dst,[$constantaddress]\t! replicate4I($con)\n\t"
+            "punpcklqdq $dst,$dst" %}
+  ins_encode %{
+    __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4)));
+    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl8I_imm(vecY dst, immI con) %{
+  predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl());
+  match(Set dst (ReplicateI con));
+  format %{ "movq    $dst,[$constantaddress]\t! replicate8I($con)\n\t"
+            "punpcklqdq $dst,$dst\n\t"
+            "vinserti128h $dst,$dst,$dst" %}
+  ins_encode %{
+    __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4)));
+    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
+    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+// Long could be loaded into xmm register directly from memory.
+instruct Repl2L_mem(vecX dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 2 && !VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateL (LoadL mem)));
+  format %{ "movq    $dst,$mem\n\t"
+            "punpcklqdq $dst,$dst\t! replicate2L" %}
+  ins_encode %{
+    __ movq($dst$$XMMRegister, $mem$$Address);
+    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+// Replicate long (8 byte) scalar to be vector
+#ifdef _LP64
+instruct Repl4L(vecY dst, rRegL src) %{
+  predicate(n->as_Vector()->length() == 4 && !VM_Version::supports_avx512vl());
+  match(Set dst (ReplicateL src));
+  format %{ "movdq   $dst,$src\n\t"
+            "punpcklqdq $dst,$dst\n\t"
+            "vinserti128h $dst,$dst,$dst\t! replicate4L" %}
+  ins_encode %{
+    __ movdq($dst$$XMMRegister, $src$$Register);
+    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
+    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+  %}
+  ins_pipe( pipe_slow );
+%}
+#else // _LP64
+instruct Repl4L(vecY dst, eRegL src, regD tmp) %{
+  predicate(n->as_Vector()->length() == 4 && !VM_Version::supports_avx512vl());
+  match(Set dst (ReplicateL src));
+  effect(TEMP dst, USE src, TEMP tmp);
+  format %{ "movdl   $dst,$src.lo\n\t"
+            "movdl   $tmp,$src.hi\n\t"
+            "punpckldq $dst,$tmp\n\t"
+            "punpcklqdq $dst,$dst\n\t"
+            "vinserti128h $dst,$dst,$dst\t! replicate4L" %}
+  ins_encode %{
+    __ movdl($dst$$XMMRegister, $src$$Register);
+    __ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register));
+    __ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister);
+    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
+    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+  %}
+  ins_pipe( pipe_slow );
+%}
+#endif // _LP64
+
+instruct Repl4L_imm(vecY dst, immL con) %{
+  predicate(n->as_Vector()->length() == 4 && !VM_Version::supports_avx512vl());
+  match(Set dst (ReplicateL con));
+  format %{ "movq    $dst,[$constantaddress]\n\t"
+            "punpcklqdq $dst,$dst\n\t"
+            "vinserti128h $dst,$dst,$dst\t! replicate4L($con)" %}
+  ins_encode %{
+    __ movq($dst$$XMMRegister, $constantaddress($con));
+    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
+    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl4L_mem(vecY dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 4 && !VM_Version::supports_avx512vl());
+  match(Set dst (ReplicateL (LoadL mem)));
+  format %{ "movq    $dst,$mem\n\t"
+            "punpcklqdq $dst,$dst\n\t"
+            "vinserti128h $dst,$dst,$dst\t! replicate4L" %}
+  ins_encode %{
+    __ movq($dst$$XMMRegister, $mem$$Address);
+    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
+    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl2F_mem(vecD dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 2 && UseAVX > 0 && !VM_Version::supports_avx512vl());
+  match(Set dst (ReplicateF (LoadF mem)));
+  format %{ "pshufd  $dst,$mem,0x00\t! replicate2F" %}
+  ins_encode %{
+    __ pshufd($dst$$XMMRegister, $mem$$Address, 0x00);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl4F_mem(vecX dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 4 && UseAVX > 0 && !VM_Version::supports_avx512vl());
+  match(Set dst (ReplicateF (LoadF mem)));
+  format %{ "pshufd  $dst,$mem,0x00\t! replicate4F" %}
+  ins_encode %{
+    __ pshufd($dst$$XMMRegister, $mem$$Address, 0x00);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl8F(vecY dst, regF src) %{
+  predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl());
+  match(Set dst (ReplicateF src));
+  format %{ "pshufd  $dst,$src,0x00\n\t"
+            "vinsertf128h $dst,$dst,$dst\t! replicate8F" %}
+  ins_encode %{
+    __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00);
+    __ vinsertf128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl8F_mem(vecY dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl());
+  match(Set dst (ReplicateF (LoadF mem)));
+  format %{ "pshufd  $dst,$mem,0x00\n\t"
+            "vinsertf128h $dst,$dst,$dst\t! replicate8F" %}
+  ins_encode %{
+    __ pshufd($dst$$XMMRegister, $mem$$Address, 0x00);
+    __ vinsertf128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl2D_mem(vecX dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 2 && UseAVX > 0 && !VM_Version::supports_avx512vl());
+  match(Set dst (ReplicateD (LoadD mem)));
+  format %{ "pshufd  $dst,$mem,0x44\t! replicate2D" %}
+  ins_encode %{
+    __ pshufd($dst$$XMMRegister, $mem$$Address, 0x44);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl4D(vecY dst, regD src) %{
+  predicate(n->as_Vector()->length() == 4 && !VM_Version::supports_avx512vl());
+  match(Set dst (ReplicateD src));
+  format %{ "pshufd  $dst,$src,0x44\n\t"
+            "vinsertf128h $dst,$dst,$dst\t! replicate4D" %}
+  ins_encode %{
+    __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x44);
+    __ vinsertf128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl4D_mem(vecY dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 4 && !VM_Version::supports_avx512vl());
+  match(Set dst (ReplicateD (LoadD mem)));
+  format %{ "pshufd  $dst,$mem,0x44\n\t"
+            "vinsertf128h $dst,$dst,$dst\t! replicate4D" %}
+  ins_encode %{
+    __ pshufd($dst$$XMMRegister, $mem$$Address, 0x44);
+    __ vinsertf128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+// ====================GENERIC REPLICATE==========================================
+
 // Replicate byte scalar to be vector
 instruct Repl4B(vecS dst, rRegI src) %{
   predicate(n->as_Vector()->length() == 4);
@@ -2923,60 +3374,6 @@
   ins_pipe( pipe_slow );
 %}
 
-instruct Repl16B(vecX dst, rRegI src) %{
-  predicate(n->as_Vector()->length() == 16);
-  match(Set dst (ReplicateB src));
-  format %{ "movd    $dst,$src\n\t"
-            "punpcklbw $dst,$dst\n\t"
-            "pshuflw $dst,$dst,0x00\n\t"
-            "punpcklqdq $dst,$dst\t! replicate16B" %}
-  ins_encode %{
-    __ movdl($dst$$XMMRegister, $src$$Register);
-    __ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister);
-    __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
-    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
-instruct Repl32B(vecY dst, rRegI src) %{
-  predicate(n->as_Vector()->length() == 32);
-  match(Set dst (ReplicateB src));
-  format %{ "movd    $dst,$src\n\t"
-            "punpcklbw $dst,$dst\n\t"
-            "pshuflw $dst,$dst,0x00\n\t"
-            "punpcklqdq $dst,$dst\n\t"
-            "vinserti128h $dst,$dst,$dst\t! replicate32B" %}
-  ins_encode %{
-    __ movdl($dst$$XMMRegister, $src$$Register);
-    __ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister);
-    __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
-    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
-instruct Repl64B(vecZ dst, rRegI src) %{
-  predicate(n->as_Vector()->length() == 64);
-  match(Set dst (ReplicateB src));
-  format %{ "movd    $dst,$src\n\t"
-            "punpcklbw $dst,$dst\n\t"
-            "pshuflw $dst,$dst,0x00\n\t"
-            "punpcklqdq $dst,$dst\n\t"
-            "vinserti128h $dst,$dst,$dst\t! lower replicate32B\n\t"
-            "vinserti64x4h $dst k0,$dst,$dst\t! upper replicate632B" %}
-  ins_encode %{
-    __ movdl($dst$$XMMRegister, $src$$Register);
-    __ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister);
-    __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
-    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti64x4h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
 // Replicate byte scalar immediate to be vector by loading from const table.
 instruct Repl4B_imm(vecS dst, immI con) %{
   predicate(n->as_Vector()->length() == 4);
@@ -2998,48 +3395,6 @@
   ins_pipe( pipe_slow );
 %}
 
-instruct Repl16B_imm(vecX dst, immI con) %{
-  predicate(n->as_Vector()->length() == 16);
-  match(Set dst (ReplicateB con));
-  format %{ "movq    $dst,[$constantaddress]\n\t"
-            "punpcklqdq $dst,$dst\t! replicate16B($con)" %}
-  ins_encode %{
-    __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1)));
-    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
-instruct Repl32B_imm(vecY dst, immI con) %{
-  predicate(n->as_Vector()->length() == 32);
-  match(Set dst (ReplicateB con));
-  format %{ "movq    $dst,[$constantaddress]\n\t"
-            "punpcklqdq $dst,$dst\n\t"
-            "vinserti128h $dst,$dst,$dst\t! lreplicate32B($con)" %}
-  ins_encode %{
-    __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1)));
-    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
-instruct Repl64B_imm(vecZ dst, immI con) %{
-  predicate(n->as_Vector()->length() == 64);
-  match(Set dst (ReplicateB con));
-  format %{ "movq    $dst,[$constantaddress]\n\t"
-            "punpcklqdq $dst,$dst\n\t"
-            "vinserti128h $dst,$dst,$dst\t! lower replicate32B($con)\n\t"
-            "vinserti64x4h $dst k0,$dst,$dst\t! upper replicate32B($con)" %}
-  ins_encode %{
-    __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1)));
-    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti64x4h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
 // Replicate byte scalar zero to be vector
 instruct Repl4B_zero(vecS dst, immI0 zero) %{
   predicate(n->as_Vector()->length() == 4);
@@ -3083,18 +3438,6 @@
   ins_pipe( fpu_reg_reg );
 %}
 
-instruct Repl64B_zero(vecZ dst, immI0 zero) %{
-  predicate(n->as_Vector()->length() == 64);
-  match(Set dst (ReplicateB zero));
-  format %{ "vpxor   $dst k0,$dst,$dst\t! replicate64B zero" %}
-  ins_encode %{
-    // Use vxorpd since AVX does not have vpxor for 512-bit (EVEX will have it).
-    int vector_len = 2;
-    __ vpxor($dst$$XMMRegister,$dst$$XMMRegister, $dst$$XMMRegister, vector_len);
-  %}
-  ins_pipe( fpu_reg_reg );
-%}
-
 // Replicate char/short (2 byte) scalar to be vector
 instruct Repl2S(vecS dst, rRegI src) %{
   predicate(n->as_Vector()->length() == 2);
@@ -3108,66 +3451,6 @@
   ins_pipe( fpu_reg_reg );
 %}
 
-instruct Repl4S(vecD dst, rRegI src) %{
-  predicate(n->as_Vector()->length() == 4);
-  match(Set dst (ReplicateS src));
-  format %{ "movd    $dst,$src\n\t"
-            "pshuflw $dst,$dst,0x00\t! replicate4S" %}
-  ins_encode %{
-    __ movdl($dst$$XMMRegister, $src$$Register);
-    __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
-  %}
-  ins_pipe( fpu_reg_reg );
-%}
-
-instruct Repl8S(vecX dst, rRegI src) %{
-  predicate(n->as_Vector()->length() == 8);
-  match(Set dst (ReplicateS src));
-  format %{ "movd    $dst,$src\n\t"
-            "pshuflw $dst,$dst,0x00\n\t"
-            "punpcklqdq $dst,$dst\t! replicate8S" %}
-  ins_encode %{
-    __ movdl($dst$$XMMRegister, $src$$Register);
-    __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
-    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
-instruct Repl16S(vecY dst, rRegI src) %{
-  predicate(n->as_Vector()->length() == 16);
-  match(Set dst (ReplicateS src));
-  format %{ "movd    $dst,$src\n\t"
-            "pshuflw $dst,$dst,0x00\n\t"
-            "punpcklqdq $dst,$dst\n\t"
-            "vinserti128h $dst,$dst,$dst\t! replicate16S" %}
-  ins_encode %{
-    __ movdl($dst$$XMMRegister, $src$$Register);
-    __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
-    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
-instruct Repl32S(vecZ dst, rRegI src) %{
-  predicate(n->as_Vector()->length() == 32);
-  match(Set dst (ReplicateS src));
-  format %{ "movd    $dst,$src\n\t"
-            "pshuflw $dst,$dst,0x00\n\t"
-            "punpcklqdq $dst,$dst\n\t"
-            "vinserti128h $dst,$dst,$dst\t! lower replicate16S\n\t"
-            "vinserti64x4h $dst k0,$dst,$dst\t! upper replicate16S" %}
-  ins_encode %{
-    __ movdl($dst$$XMMRegister, $src$$Register);
-    __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
-    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti64x4h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
 // Replicate char/short (2 byte) scalar immediate to be vector by loading from const table.
 instruct Repl2S_imm(vecS dst, immI con) %{
   predicate(n->as_Vector()->length() == 2);
@@ -3189,48 +3472,6 @@
   ins_pipe( fpu_reg_reg );
 %}
 
-instruct Repl8S_imm(vecX dst, immI con) %{
-  predicate(n->as_Vector()->length() == 8);
-  match(Set dst (ReplicateS con));
-  format %{ "movq    $dst,[$constantaddress]\n\t"
-            "punpcklqdq $dst,$dst\t! replicate8S($con)" %}
-  ins_encode %{
-    __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2)));
-    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
-instruct Repl16S_imm(vecY dst, immI con) %{
-  predicate(n->as_Vector()->length() == 16);
-  match(Set dst (ReplicateS con));
-  format %{ "movq    $dst,[$constantaddress]\n\t"
-            "punpcklqdq $dst,$dst\n\t"
-            "vinserti128h $dst,$dst,$dst\t! replicate16S($con)" %}
-  ins_encode %{
-    __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2)));
-    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
-instruct Repl32S_imm(vecZ dst, immI con) %{
-  predicate(n->as_Vector()->length() == 32);
-  match(Set dst (ReplicateS con));
-  format %{ "movq    $dst,[$constantaddress]\n\t"
-            "punpcklqdq $dst,$dst\n\t"
-            "vinserti128h $dst,$dst,$dst\t! lower replicate16S($con)\n\t"
-            "vinserti64x4h $dst k0,$dst,$dst\t! upper replicate16S($con)" %}
-  ins_encode %{
-    __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2)));
-    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti64x4h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
 // Replicate char/short (2 byte) scalar zero to be vector
 instruct Repl2S_zero(vecS dst, immI0 zero) %{
   predicate(n->as_Vector()->length() == 2);
@@ -3274,18 +3515,6 @@
   ins_pipe( fpu_reg_reg );
 %}
 
-instruct Repl32S_zero(vecZ dst, immI0 zero) %{
-  predicate(n->as_Vector()->length() == 32);
-  match(Set dst (ReplicateS zero));
-  format %{ "vpxor   $dst k0,$dst,$dst\t! replicate32S zero" %}
-  ins_encode %{
-    // Use vxorpd since AVX does not have vpxor for 512-bit (EVEX will have it).
-    int vector_len = 2;
-    __ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector_len);
-  %}
-  ins_pipe( fpu_reg_reg );
-%}
-
 // Replicate integer (4 byte) scalar to be vector
 instruct Repl2I(vecD dst, rRegI src) %{
   predicate(n->as_Vector()->length() == 2);
@@ -3299,101 +3528,6 @@
   ins_pipe( fpu_reg_reg );
 %}
 
-instruct Repl4I(vecX dst, rRegI src) %{
-  predicate(n->as_Vector()->length() == 4);
-  match(Set dst (ReplicateI src));
-  format %{ "movd    $dst,$src\n\t"
-            "pshufd  $dst,$dst,0x00\t! replicate4I" %}
-  ins_encode %{
-    __ movdl($dst$$XMMRegister, $src$$Register);
-    __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
-instruct Repl8I(vecY dst, rRegI src) %{
-  predicate(n->as_Vector()->length() == 8);
-  match(Set dst (ReplicateI src));
-  format %{ "movd    $dst,$src\n\t"
-            "pshufd  $dst,$dst,0x00\n\t"
-            "vinserti128h $dst,$dst,$dst\t! replicate8I" %}
-  ins_encode %{
-    __ movdl($dst$$XMMRegister, $src$$Register);
-    __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
-instruct Repl16I(vecZ dst, rRegI src) %{
-  predicate(n->as_Vector()->length() == 16);
-  match(Set dst (ReplicateI src));
-  format %{ "movd    $dst,$src\n\t"
-            "pshufd  $dst,$dst,0x00\n\t"
-            "vinserti128h $dst,$dst,$dst\t! lower replicate8I\n\t"
-            "vinserti64x4h $dst k0,$dst,$dst\t! upper replicate8I" %}
-  ins_encode %{
-    __ movdl($dst$$XMMRegister, $src$$Register);
-    __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti64x4h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
-// Replicate integer (4 byte) scalar immediate to be vector by loading from const table.
-instruct Repl2I_imm(vecD dst, immI con) %{
-  predicate(n->as_Vector()->length() == 2);
-  match(Set dst (ReplicateI con));
-  format %{ "movq    $dst,[$constantaddress]\t! replicate2I($con)" %}
-  ins_encode %{
-    __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4)));
-  %}
-  ins_pipe( fpu_reg_reg );
-%}
-
-instruct Repl4I_imm(vecX dst, immI con) %{
-  predicate(n->as_Vector()->length() == 4);
-  match(Set dst (ReplicateI con));
-  format %{ "movq    $dst,[$constantaddress]\t! replicate4I($con)\n\t"
-            "punpcklqdq $dst,$dst" %}
-  ins_encode %{
-    __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4)));
-    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
-instruct Repl8I_imm(vecY dst, immI con) %{
-  predicate(n->as_Vector()->length() == 8);
-  match(Set dst (ReplicateI con));
-  format %{ "movq    $dst,[$constantaddress]\t! replicate8I($con)\n\t"
-            "punpcklqdq $dst,$dst\n\t"
-            "vinserti128h $dst,$dst,$dst" %}
-  ins_encode %{
-    __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4)));
-    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
-instruct Repl16I_imm(vecZ dst, immI con) %{
-  predicate(n->as_Vector()->length() == 16);
-  match(Set dst (ReplicateI con));
-  format %{ "movq    $dst,[$constantaddress]\t! replicate16I($con)\n\t"
-            "punpcklqdq $dst,$dst\n\t"
-            "vinserti128h $dst,$dst,$dst\n\t"
-            "vinserti64x4h $dst k0,$dst,$dst" %}
-  ins_encode %{
-    __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4)));
-    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti64x4h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
 // Integer could be loaded into xmm register directly from memory.
 instruct Repl2I_mem(vecD dst, memory mem) %{
   predicate(n->as_Vector()->length() == 2);
@@ -3407,46 +3541,15 @@
   ins_pipe( fpu_reg_reg );
 %}
 
-instruct Repl4I_mem(vecX dst, memory mem) %{
-  predicate(n->as_Vector()->length() == 4);
-  match(Set dst (ReplicateI (LoadI mem)));
-  format %{ "movd    $dst,$mem\n\t"
-            "pshufd  $dst,$dst,0x00\t! replicate4I" %}
-  ins_encode %{
-    __ movdl($dst$$XMMRegister, $mem$$Address);
-    __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
-instruct Repl8I_mem(vecY dst, memory mem) %{
-  predicate(n->as_Vector()->length() == 8);
-  match(Set dst (ReplicateI (LoadI mem)));
-  format %{ "movd    $dst,$mem\n\t"
-            "pshufd  $dst,$dst,0x00\n\t"
-            "vinserti128h $dst,$dst,$dst\t! replicate8I" %}
-  ins_encode %{
-    __ movdl($dst$$XMMRegister, $mem$$Address);
-    __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
-instruct Repl16I_mem(vecZ dst, memory mem) %{
-  predicate(n->as_Vector()->length() == 16);
-  match(Set dst (ReplicateI (LoadI mem)));
-  format %{ "movd    $dst,$mem\n\t"
-            "pshufd  $dst,$dst,0x00\n\t"
-            "vinserti128h $dst,$dst,$dst\t! lower replicate8I\n\t"
-            "vinserti64x4h $dst k0,$dst,$dst\t! upper replicate8I" %}
-  ins_encode %{
-    __ movdl($dst$$XMMRegister, $mem$$Address);
-    __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti64x4h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
+// Replicate integer (4 byte) scalar immediate to be vector by loading from const table.
+instruct Repl2I_imm(vecD dst, immI con) %{
+  predicate(n->as_Vector()->length() == 2);
+  match(Set dst (ReplicateI con));
+  format %{ "movq    $dst,[$constantaddress]\t! replicate2I($con)" %}
+  ins_encode %{
+    __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4)));
+  %}
+  ins_pipe( fpu_reg_reg );
 %}
 
 // Replicate integer (4 byte) scalar zero to be vector
@@ -3482,18 +3585,6 @@
   ins_pipe( fpu_reg_reg );
 %}
 
-instruct Repl16I_zero(vecZ dst, immI0 zero) %{
-  predicate(n->as_Vector()->length() == 16);
-  match(Set dst (ReplicateI zero));
-  format %{ "vpxor   $dst k0,$dst,$dst\t! replicate16I zero" %}
-  ins_encode %{
-    // Use vxorpd since AVX does not have vpxor for 512-bit (AVX2 will have it).
-    int vector_len = 2;
-    __ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector_len);
-  %}
-  ins_pipe( fpu_reg_reg );
-%}
-
 // Replicate long (8 byte) scalar to be vector
 #ifdef _LP64
 instruct Repl2L(vecX dst, rRegL src) %{
@@ -3507,36 +3598,6 @@
   %}
   ins_pipe( pipe_slow );
 %}
-
-instruct Repl4L(vecY dst, rRegL src) %{
-  predicate(n->as_Vector()->length() == 4);
-  match(Set dst (ReplicateL src));
-  format %{ "movdq   $dst,$src\n\t"
-            "punpcklqdq $dst,$dst\n\t"
-            "vinserti128h $dst,$dst,$dst\t! replicate4L" %}
-  ins_encode %{
-    __ movdq($dst$$XMMRegister, $src$$Register);
-    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
-instruct Repl8L(vecZ dst, rRegL src) %{
-  predicate(n->as_Vector()->length() == 8);
-  match(Set dst (ReplicateL src));
-  format %{ "movdq   $dst,$src\n\t"
-            "punpcklqdq $dst,$dst\n\t"
-            "vinserti128h $dst,$dst,$dst\t! lower replicate4L\n\t"
-            "vinserti64x4h $dst k0,$dst,$dst\t! upper replicate4L" %}
-  ins_encode %{
-    __ movdq($dst$$XMMRegister, $src$$Register);
-    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti64x4h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
-%}
 #else // _LP64
 instruct Repl2L(vecX dst, eRegL src, regD tmp) %{
   predicate(n->as_Vector()->length() == 2);
@@ -3554,45 +3615,6 @@
   %}
   ins_pipe( pipe_slow );
 %}
-
-instruct Repl4L(vecY dst, eRegL src, regD tmp) %{
-  predicate(n->as_Vector()->length() == 4);
-  match(Set dst (ReplicateL src));
-  effect(TEMP dst, USE src, TEMP tmp);
-  format %{ "movdl   $dst,$src.lo\n\t"
-            "movdl   $tmp,$src.hi\n\t"
-            "punpckldq $dst,$tmp\n\t"
-            "punpcklqdq $dst,$dst\n\t"
-            "vinserti128h $dst,$dst,$dst\t! replicate4L" %}
-  ins_encode %{
-    __ movdl($dst$$XMMRegister, $src$$Register);
-    __ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register));
-    __ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister);
-    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
-instruct Repl8L(vecZ dst, eRegL src, regD tmp) %{
-  predicate(n->as_Vector()->length() == 4);
-  match(Set dst (ReplicateL src));
-  effect(TEMP dst, USE src, TEMP tmp);
-  format %{ "movdl   $dst,$src.lo\n\t"
-            "movdl   $tmp,$src.hi\n\t"
-            "punpckldq $dst,$tmp\n\t"
-            "vinserti128h $dst,$dst,$dst\t! lower replicate4L\n\t"
-            "vinserti64x4h $dst k0,$dst,$dst\t! upper replicate4L" %}
-  ins_encode %{
-    __ movdl($dst$$XMMRegister, $src$$Register);
-    __ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register));
-    __ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister);
-    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti64x4h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
-%}
 #endif // _LP64
 
 // Replicate long (8 byte) scalar immediate to be vector by loading from const table.
@@ -3608,79 +3630,6 @@
   ins_pipe( pipe_slow );
 %}
 
-instruct Repl4L_imm(vecY dst, immL con) %{
-  predicate(n->as_Vector()->length() == 4);
-  match(Set dst (ReplicateL con));
-  format %{ "movq    $dst,[$constantaddress]\n\t"
-            "punpcklqdq $dst,$dst\n\t"
-            "vinserti128h $dst,$dst,$dst\t! replicate4L($con)" %}
-  ins_encode %{
-    __ movq($dst$$XMMRegister, $constantaddress($con));
-    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
-instruct Repl8L_imm(vecZ dst, immL con) %{
-  predicate(n->as_Vector()->length() == 8);
-  match(Set dst (ReplicateL con));
-  format %{ "movq    $dst,[$constantaddress]\n\t"
-            "punpcklqdq $dst,$dst\n\t"
-            "vinserti128h $dst,$dst,$dst\t! lower replicate4L($con)\n\t"
-            "vinserti64x4h $dst k0,$dst,$dst\t! upper replicate4L($con)" %}
-  ins_encode %{
-    __ movq($dst$$XMMRegister, $constantaddress($con));
-    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti64x4h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
-// Long could be loaded into xmm register directly from memory.
-instruct Repl2L_mem(vecX dst, memory mem) %{
-  predicate(n->as_Vector()->length() == 2);
-  match(Set dst (ReplicateL (LoadL mem)));
-  format %{ "movq    $dst,$mem\n\t"
-            "punpcklqdq $dst,$dst\t! replicate2L" %}
-  ins_encode %{
-    __ movq($dst$$XMMRegister, $mem$$Address);
-    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
-instruct Repl4L_mem(vecY dst, memory mem) %{
-  predicate(n->as_Vector()->length() == 4);
-  match(Set dst (ReplicateL (LoadL mem)));
-  format %{ "movq    $dst,$mem\n\t"
-            "punpcklqdq $dst,$dst\n\t"
-            "vinserti128h $dst,$dst,$dst\t! replicate4L" %}
-  ins_encode %{
-    __ movq($dst$$XMMRegister, $mem$$Address);
-    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
-instruct Repl8L_mem(vecZ dst, memory mem) %{
-  predicate(n->as_Vector()->length() == 8);
-  match(Set dst (ReplicateL (LoadL mem)));
-  format %{ "movq    $dst,$mem\n\t"
-            "punpcklqdq $dst,$dst\n\t"
-            "vinserti128h $dst,$dst,$dst\t! lower replicate4L\n\t"
-            "vinserti64x4h $dst k0,$dst,$dst\t! upper replicate4L" %}
-  ins_encode %{
-    __ movq($dst$$XMMRegister, $mem$$Address);
-    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti64x4h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
 // Replicate long (8 byte) scalar zero to be vector
 instruct Repl2L_zero(vecX dst, immL0 zero) %{
   predicate(n->as_Vector()->length() == 2);
@@ -3704,18 +3653,6 @@
   ins_pipe( fpu_reg_reg );
 %}
 
-instruct Repl8L_zero(vecZ dst, immL0 zero) %{
-  predicate(n->as_Vector()->length() == 8);
-  match(Set dst (ReplicateL zero));
-  format %{ "vpxor   $dst k0,$dst,$dst\t! replicate8L zero" %}
-  ins_encode %{
-    // Use vxorpd since AVX does not have vpxor for 512-bit (EVEX will have it).
-    int vector_len = 2;
-    __ vpxor($dst$$XMMRegister,$dst$$XMMRegister, $dst$$XMMRegister, vector_len);
-  %}
-  ins_pipe( fpu_reg_reg );
-%}
-
 // Replicate float (4 byte) scalar to be vector
 instruct Repl2F(vecD dst, regF src) %{
   predicate(n->as_Vector()->length() == 2);
@@ -3737,32 +3674,6 @@
   ins_pipe( pipe_slow );
 %}
 
-instruct Repl8F(vecY dst, regF src) %{
-  predicate(n->as_Vector()->length() == 8);
-  match(Set dst (ReplicateF src));
-  format %{ "pshufd  $dst,$src,0x00\n\t"
-            "vinsertf128h $dst,$dst,$dst\t! replicate8F" %}
-  ins_encode %{
-    __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00);
-    __ vinsertf128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
-instruct Repl16F(vecZ dst, regF src) %{
-  predicate(n->as_Vector()->length() == 16);
-  match(Set dst (ReplicateF src));
-  format %{ "pshufd  $dst,$src,0x00\n\t"
-            "vinsertf128h $dst,$dst,$dst\t! lower replicate8F\n\t"
-            "vinsertf64x4h $dst k0,$dst,$dst\t! lower replicate8F" %}
-  ins_encode %{
-    __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00);
-    __ vinsertf128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinsertf64x4h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
 // Replicate float (4 byte) scalar zero to be vector
 instruct Repl2F_zero(vecD dst, immF0 zero) %{
   predicate(n->as_Vector()->length() == 2);
@@ -3795,17 +3706,6 @@
   ins_pipe( fpu_reg_reg );
 %}
 
-instruct Repl16F_zero(vecZ dst, immF0 zero) %{
-  predicate(n->as_Vector()->length() == 16);
-  match(Set dst (ReplicateF zero));
-  format %{ "vxorps  $dst k0,$dst,$dst\t! replicate16F zero" %}
-  ins_encode %{
-    int vector_len = 2;
-    __ vxorps($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector_len);
-  %}
-  ins_pipe( fpu_reg_reg );
-%}
-
 // Replicate double (8 bytes) scalar to be vector
 instruct Repl2D(vecX dst, regD src) %{
   predicate(n->as_Vector()->length() == 2);
@@ -3817,32 +3717,6 @@
   ins_pipe( pipe_slow );
 %}
 
-instruct Repl4D(vecY dst, regD src) %{
-  predicate(n->as_Vector()->length() == 4);
-  match(Set dst (ReplicateD src));
-  format %{ "pshufd  $dst,$src,0x44\n\t"
-            "vinsertf128h $dst,$dst,$dst\t! replicate4D" %}
-  ins_encode %{
-    __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x44);
-    __ vinsertf128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
-instruct Repl8D(vecZ dst, regD src) %{
-  predicate(n->as_Vector()->length() == 8);
-  match(Set dst (ReplicateD src));
-  format %{ "pshufd  $dst,$src,0x44\n\t"
-            "vinsertf128h $dst,$dst,$dst\t! lower replicate4D\n\t"
-            "vinsertf64x4h $dst k0,$dst,$dst\t! upper replicate4D" %}
-  ins_encode %{
-    __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x44);
-    __ vinsertf128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinsertf64x4h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
 // Replicate double (8 byte) scalar zero to be vector
 instruct Repl2D_zero(vecX dst, immD0 zero) %{
   predicate(n->as_Vector()->length() == 2);
@@ -3865,8 +3739,636 @@
   ins_pipe( fpu_reg_reg );
 %}
 
-instruct Repl8D_zero(vecZ dst, immD0 zero) %{
-  predicate(n->as_Vector()->length() == 8);
+// ====================EVEX REPLICATE=============================================
+
+instruct Repl4B_mem_evex(vecS dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 4 && VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateB (LoadB mem)));
+  format %{ "vpbroadcastb  $dst,$mem\t! replicate4B" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ evpbroadcastb($dst$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl8B_mem_evex(vecD dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 8 && VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateB (LoadB mem)));
+  format %{ "vpbroadcastb  $dst,$mem\t! replicate8B" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ evpbroadcastb($dst$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl16B_evex(vecX dst, rRegI src) %{
+  predicate(n->as_Vector()->length() == 16 && VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateB src));
+  format %{ "vpbroadcastb $dst,$src\t! replicate16B" %}
+  ins_encode %{
+   int vector_len = 0;
+    __ evpbroadcastb($dst$$XMMRegister, $src$$Register, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl16B_mem_evex(vecX dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 16 && VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateB (LoadB mem)));
+  format %{ "vpbroadcastb  $dst,$mem\t! replicate16B" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ evpbroadcastb($dst$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl32B_evex(vecY dst, rRegI src) %{
+  predicate(n->as_Vector()->length() == 32 && VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateB src));
+  format %{ "vpbroadcastb $dst,$src\t! replicate32B" %}
+  ins_encode %{
+   int vector_len = 1;
+    __ evpbroadcastb($dst$$XMMRegister, $src$$Register, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl32B_mem_evex(vecY dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 32 && VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateB (LoadB mem)));
+  format %{ "vpbroadcastb  $dst,$mem\t! replicate32B" %}
+  ins_encode %{
+    int vector_len = 1;
+    __ evpbroadcastb($dst$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl64B_evex(vecZ dst, rRegI src) %{
+  predicate(n->as_Vector()->length() == 64 && UseAVX > 2);
+  match(Set dst (ReplicateB src));
+  format %{ "vpbroadcastb $dst,$src\t! upper replicate64B" %}
+  ins_encode %{
+   int vector_len = 2;
+    __ evpbroadcastb($dst$$XMMRegister, $src$$Register, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl64B_mem_evex(vecZ dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 64 && VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateB (LoadB mem)));
+  format %{ "vpbroadcastb  $dst,$mem\t! replicate64B" %}
+  ins_encode %{
+    int vector_len = 2;
+    __ evpbroadcastb($dst$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl16B_imm_evex(vecX dst, immI con) %{
+  predicate(n->as_Vector()->length() == 16 && VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateB con));
+  format %{ "movq    $dst,[$constantaddress]\n\t"
+            "vpbroadcastb $dst,$dst\t! replicate16B" %}
+  ins_encode %{
+   int vector_len = 0;
+    __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1)));
+    __ evpbroadcastb($dst$$XMMRegister, $dst$$XMMRegister, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl32B_imm_evex(vecY dst, immI con) %{
+  predicate(n->as_Vector()->length() == 32 && VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateB con));
+  format %{ "movq    $dst,[$constantaddress]\n\t"
+            "vpbroadcastb $dst,$dst\t! replicate32B" %}
+  ins_encode %{
+   int vector_len = 1;
+    __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1)));
+    __ evpbroadcastb($dst$$XMMRegister, $dst$$XMMRegister, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl64B_imm_evex(vecZ dst, immI con) %{
+  predicate(n->as_Vector()->length() == 64 && UseAVX > 2);
+  match(Set dst (ReplicateB con));
+  format %{ "movq    $dst,[$constantaddress]\n\t"
+            "vpbroadcastb $dst,$dst\t! upper replicate64B" %}
+  ins_encode %{
+   int vector_len = 2;
+    __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1)));
+    __ evpbroadcastb($dst$$XMMRegister, $dst$$XMMRegister, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl64B_zero_evex(vecZ dst, immI0 zero) %{
+  predicate(n->as_Vector()->length() == 64 && UseAVX > 2);
+  match(Set dst (ReplicateB zero));
+  format %{ "vpxor   $dst k0,$dst,$dst\t! replicate64B zero" %}
+  ins_encode %{
+    // Use vxorpd since AVX does not have vpxor for 512-bit (EVEX will have it).
+    int vector_len = 2;
+    __ vpxor($dst$$XMMRegister,$dst$$XMMRegister, $dst$$XMMRegister, vector_len);
+  %}
+  ins_pipe( fpu_reg_reg );
+%}
+
+instruct Repl4S_evex(vecD dst, rRegI src) %{
+  predicate(n->as_Vector()->length() == 4 && VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateS src));
+  format %{ "vpbroadcastw $dst,$src\t! replicate4S" %}
+  ins_encode %{
+   int vector_len = 0;
+    __ evpbroadcastw($dst$$XMMRegister, $src$$Register, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl4S_mem_evex(vecD dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 4 && VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateS (LoadS mem)));
+  format %{ "vpbroadcastw  $dst,$mem\t! replicate4S" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ evpbroadcastw($dst$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl8S_evex(vecX dst, rRegI src) %{
+  predicate(n->as_Vector()->length() == 8 && VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateS src));
+  format %{ "vpbroadcastw $dst,$src\t! replicate8S" %}
+  ins_encode %{
+   int vector_len = 0;
+    __ evpbroadcastw($dst$$XMMRegister, $src$$Register, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl8S_mem_evex(vecX dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 8 && VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateS (LoadS mem)));
+  format %{ "vpbroadcastw  $dst,$mem\t! replicate8S" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ evpbroadcastw($dst$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl16S_evex(vecY dst, rRegI src) %{
+  predicate(n->as_Vector()->length() == 16 && VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateS src));
+  format %{ "vpbroadcastw $dst,$src\t! replicate16S" %}
+  ins_encode %{
+   int vector_len = 1;
+    __ evpbroadcastw($dst$$XMMRegister, $src$$Register, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl16S_mem_evex(vecY dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 16 && VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateS (LoadS mem)));
+  format %{ "vpbroadcastw  $dst,$mem\t! replicate16S" %}
+  ins_encode %{
+    int vector_len = 1;
+    __ evpbroadcastw($dst$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl32S_evex(vecZ dst, rRegI src) %{
+  predicate(n->as_Vector()->length() == 32 && UseAVX > 2);
+  match(Set dst (ReplicateS src));
+  format %{ "vpbroadcastw $dst,$src\t! replicate32S" %}
+  ins_encode %{
+   int vector_len = 2;
+    __ evpbroadcastw($dst$$XMMRegister, $src$$Register, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl32S_mem_evex(vecZ dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 32 && UseAVX > 2);
+  match(Set dst (ReplicateS (LoadS mem)));
+  format %{ "vpbroadcastw  $dst,$mem\t! replicate32S" %}
+  ins_encode %{
+    int vector_len = 2;
+    __ evpbroadcastw($dst$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl8S_imm_evex(vecX dst, immI con) %{
+  predicate(n->as_Vector()->length() == 8 && VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateS con));
+  format %{ "movq    $dst,[$constantaddress]\n\t"
+            "vpbroadcastw $dst,$dst\t! replicate8S" %}
+  ins_encode %{
+   int vector_len = 0;
+    __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2)));
+    __ evpbroadcastw($dst$$XMMRegister, $dst$$XMMRegister, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl16S_imm_evex(vecY dst, immI con) %{
+  predicate(n->as_Vector()->length() == 16 && VM_Version::supports_avx512vlbw());
+  match(Set dst (ReplicateS con));
+  format %{ "movq    $dst,[$constantaddress]\n\t"
+            "vpbroadcastw $dst,$dst\t! replicate16S" %}
+  ins_encode %{
+   int vector_len = 1;
+    __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2)));
+    __ evpbroadcastw($dst$$XMMRegister, $dst$$XMMRegister, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl32S_imm_evex(vecZ dst, immI con) %{
+  predicate(n->as_Vector()->length() == 32 && UseAVX > 2);
+  match(Set dst (ReplicateS con));
+  format %{ "movq    $dst,[$constantaddress]\n\t"
+            "vpbroadcastw $dst,$dst\t! replicate32S" %}
+  ins_encode %{
+   int vector_len = 2;
+    __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2)));
+    __ evpbroadcastw($dst$$XMMRegister, $dst$$XMMRegister, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl32S_zero_evex(vecZ dst, immI0 zero) %{
+  predicate(n->as_Vector()->length() == 32 && UseAVX > 2);
+  match(Set dst (ReplicateS zero));
+  format %{ "vpxor   $dst k0,$dst,$dst\t! replicate32S zero" %}
+  ins_encode %{
+    // Use vxorpd since AVX does not have vpxor for 512-bit (EVEX will have it).
+    int vector_len = 2;
+    __ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector_len);
+  %}
+  ins_pipe( fpu_reg_reg );
+%}
+
+instruct Repl4I_evex(vecX dst, rRegI src) %{
+  predicate(n->as_Vector()->length() == 4 && VM_Version::supports_avx512vl());
+  match(Set dst (ReplicateI src));
+  format %{ "vpbroadcastd  $dst,$src\t! replicate4I" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ evpbroadcastd($dst$$XMMRegister, $src$$Register, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl4I_mem_evex(vecX dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 4 && VM_Version::supports_avx512vl());
+  match(Set dst (ReplicateI (LoadI mem)));
+  format %{ "vpbroadcastd  $dst,$mem\t! replicate4I" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ evpbroadcastd($dst$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl8I_evex(vecY dst, rRegI src) %{
+  predicate(n->as_Vector()->length() == 8 && VM_Version::supports_avx512vl());
+  match(Set dst (ReplicateI src));
+  format %{ "vpbroadcastd  $dst,$src\t! replicate8I" %}
+  ins_encode %{
+    int vector_len = 1;
+    __ evpbroadcastd($dst$$XMMRegister, $src$$Register, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl8I_mem_evex(vecY dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 8 && VM_Version::supports_avx512vl());
+  match(Set dst (ReplicateI (LoadI mem)));
+  format %{ "vpbroadcastd  $dst,$mem\t! replicate8I" %}
+  ins_encode %{
+    int vector_len = 1;
+    __ evpbroadcastd($dst$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl16I_evex(vecZ dst, rRegI src) %{
+  predicate(n->as_Vector()->length() == 16 && UseAVX > 2);
+  match(Set dst (ReplicateI src));
+  format %{ "vpbroadcastd  $dst,$src\t! replicate16I" %}
+  ins_encode %{
+    int vector_len = 2;
+    __ evpbroadcastd($dst$$XMMRegister, $src$$Register, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl16I_mem_evex(vecZ dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 16 && UseAVX > 2);
+  match(Set dst (ReplicateI (LoadI mem)));
+  format %{ "vpbroadcastd  $dst,$mem\t! replicate16I" %}
+  ins_encode %{
+    int vector_len = 2;
+    __ evpbroadcastd($dst$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl4I_imm_evex(vecX dst, immI con) %{
+  predicate(n->as_Vector()->length() == 4 && VM_Version::supports_avx512vl());
+  match(Set dst (ReplicateI con));
+  format %{ "movq    $dst,[$constantaddress]\t! replicate8I($con)\n\t"
+            "vpbroadcastd  $dst,$dst\t! replicate4I" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4)));
+    __ evpbroadcastd($dst$$XMMRegister, $dst$$XMMRegister, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl8I_imm_evex(vecY dst, immI con) %{
+  predicate(n->as_Vector()->length() == 8 && VM_Version::supports_avx512vl());
+  match(Set dst (ReplicateI con));
+  format %{ "movq    $dst,[$constantaddress]\t! replicate8I($con)\n\t"
+            "vpbroadcastd  $dst,$dst\t! replicate8I" %}
+  ins_encode %{
+    int vector_len = 1;
+    __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4)));
+    __ evpbroadcastd($dst$$XMMRegister, $dst$$XMMRegister, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl16I_imm_evex(vecZ dst, immI con) %{
+  predicate(n->as_Vector()->length() == 16 && UseAVX > 2);
+  match(Set dst (ReplicateI con));
+  format %{ "movq    $dst,[$constantaddress]\t! replicate16I($con)\n\t"
+            "vpbroadcastd  $dst,$dst\t! replicate16I" %}
+  ins_encode %{
+    int vector_len = 2;
+    __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4)));
+    __ evpbroadcastd($dst$$XMMRegister, $dst$$XMMRegister, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl16I_zero_evex(vecZ dst, immI0 zero) %{
+  predicate(n->as_Vector()->length() == 16 && UseAVX > 2);
+  match(Set dst (ReplicateI zero));
+  format %{ "vpxor   $dst k0,$dst,$dst\t! replicate16I zero" %}
+  ins_encode %{
+    // Use vxorpd since AVX does not have vpxor for 512-bit (AVX2 will have it).
+    int vector_len = 2;
+    __ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector_len);
+  %}
+  ins_pipe( fpu_reg_reg );
+%}
+
+// Replicate long (8 byte) scalar to be vector
+#ifdef _LP64
+instruct Repl4L_evex(vecY dst, rRegL src) %{
+  predicate(n->as_Vector()->length() == 4 && VM_Version::supports_avx512vl());
+  match(Set dst (ReplicateL src));
+  format %{ "vpbroadcastq  $dst,$src\t! replicate4L" %}
+  ins_encode %{
+    int vector_len = 1;
+    __ evpbroadcastq($dst$$XMMRegister, $src$$Register, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl8L_evex(vecZ dst, rRegL src) %{
+  predicate(n->as_Vector()->length() == 8 && UseAVX > 2);
+  match(Set dst (ReplicateL src));
+  format %{ "vpbroadcastq  $dst,$src\t! replicate8L" %}
+  ins_encode %{
+    int vector_len = 2;
+    __ evpbroadcastq($dst$$XMMRegister, $src$$Register, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+#else // _LP64
+instruct Repl4L_evex(vecY dst, eRegL src, regD tmp) %{
+  predicate(n->as_Vector()->length() == 4 && VM_Version::supports_avx512vl());
+  match(Set dst (ReplicateL src));
+  effect(TEMP dst, USE src, TEMP tmp);
+  format %{ "movdl   $dst,$src.lo\n\t"
+            "movdl   $tmp,$src.hi\n\t"
+            "punpckldq $dst,$tmp\n\t"
+            "vpbroadcastq  $dst,$dst\t! replicate4L" %}
+  ins_encode %{
+    int vector_len = 1;
+    __ movdl($dst$$XMMRegister, $src$$Register);
+    __ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register));
+    __ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister);
+    __ evpbroadcastq($dst$$XMMRegister, $dst$$XMMRegister, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl8L_evex(vecZ dst, eRegL src, regD tmp) %{
+  predicate(n->as_Vector()->length() == 8 && UseAVX > 2);
+  match(Set dst (ReplicateL src));
+  effect(TEMP dst, USE src, TEMP tmp);
+  format %{ "movdl   $dst,$src.lo\n\t"
+            "movdl   $tmp,$src.hi\n\t"
+            "punpckldq $dst,$tmp\n\t"
+            "vpbroadcastq  $dst,$dst\t! replicate8L" %}
+  ins_encode %{
+    int vector_len = 2;
+    __ movdl($dst$$XMMRegister, $src$$Register);
+    __ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register));
+    __ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister);
+    __ evpbroadcastq($dst$$XMMRegister, $dst$$XMMRegister, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+#endif // _LP64
+
+instruct Repl4L_imm_evex(vecY dst, immL con) %{
+  predicate(n->as_Vector()->length() == 4 && VM_Version::supports_avx512vl());
+  match(Set dst (ReplicateL con));
+  format %{ "movq    $dst,[$constantaddress]\n\t"
+            "vpbroadcastq  $dst,$dst\t! replicate4L" %}
+  ins_encode %{
+    int vector_len = 1;
+    __ movq($dst$$XMMRegister, $constantaddress($con));
+    __ evpbroadcastq($dst$$XMMRegister, $dst$$XMMRegister, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl8L_imm_evex(vecZ dst, immL con) %{
+  predicate(n->as_Vector()->length() == 8 && UseAVX > 2);
+  match(Set dst (ReplicateL con));
+  format %{ "movq    $dst,[$constantaddress]\n\t"
+            "vpbroadcastq  $dst,$dst\t! replicate8L" %}
+  ins_encode %{
+    int vector_len = 2;
+    __ movq($dst$$XMMRegister, $constantaddress($con));
+    __ evpbroadcastq($dst$$XMMRegister, $dst$$XMMRegister, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl2L_mem_evex(vecX dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 2 && VM_Version::supports_avx512vl());
+  match(Set dst (ReplicateL (LoadL mem)));
+  format %{ "vpbroadcastd  $dst,$mem\t! replicate2L" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ evpbroadcastq($dst$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl4L_mem_evex(vecY dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 4 && VM_Version::supports_avx512vl());
+  match(Set dst (ReplicateL (LoadL mem)));
+  format %{ "vpbroadcastd  $dst,$mem\t! replicate4L" %}
+  ins_encode %{
+    int vector_len = 1;
+    __ evpbroadcastq($dst$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl8L_mem_evex(vecZ dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 8 && UseAVX > 2);
+  match(Set dst (ReplicateL (LoadL mem)));
+  format %{ "vpbroadcastd  $dst,$mem\t! replicate8L" %}
+  ins_encode %{
+    int vector_len = 2;
+    __ evpbroadcastq($dst$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl8L_zero_evex(vecZ dst, immL0 zero) %{
+  predicate(n->as_Vector()->length() == 8 && UseAVX > 2);
+  match(Set dst (ReplicateL zero));
+  format %{ "vpxor   $dst k0,$dst,$dst\t! replicate8L zero" %}
+  ins_encode %{
+    // Use vxorpd since AVX does not have vpxor for 512-bit (EVEX will have it).
+    int vector_len = 2;
+    __ vpxor($dst$$XMMRegister,$dst$$XMMRegister, $dst$$XMMRegister, vector_len);
+  %}
+  ins_pipe( fpu_reg_reg );
+%}
+
+instruct Repl8F_evex(vecY dst, regF src) %{
+  predicate(n->as_Vector()->length() == 8 && VM_Version::supports_avx512vl());
+  match(Set dst (ReplicateF src));
+  format %{ "vbroadcastss $dst,$src\t! replicate8F" %}
+  ins_encode %{
+    int vector_len = 1;
+    __ evpbroadcastss($dst$$XMMRegister, $src$$XMMRegister, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl8F_mem_evex(vecY dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 8 && VM_Version::supports_avx512vl());
+  match(Set dst (ReplicateF (LoadF mem)));
+  format %{ "vbroadcastss  $dst,$mem\t! replicate8F" %}
+  ins_encode %{
+    int vector_len = 1;
+    __ evpbroadcastss($dst$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl16F_evex(vecZ dst, regF src) %{
+  predicate(n->as_Vector()->length() == 16 && UseAVX > 2);
+  match(Set dst (ReplicateF src));
+  format %{ "vbroadcastss $dst,$src\t! replicate16F" %}
+  ins_encode %{
+    int vector_len = 2;
+    __ evpbroadcastss($dst$$XMMRegister, $src$$XMMRegister, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl16F_mem_evex(vecZ dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 16 && UseAVX > 2);
+  match(Set dst (ReplicateF (LoadF mem)));
+  format %{ "vbroadcastss  $dst,$mem\t! replicate16F" %}
+  ins_encode %{
+    int vector_len = 2;
+    __ evpbroadcastss($dst$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl16F_zero_evex(vecZ dst, immF0 zero) %{
+  predicate(n->as_Vector()->length() == 16 && UseAVX > 2);
+  match(Set dst (ReplicateF zero));
+  format %{ "vxorps  $dst k0,$dst,$dst\t! replicate16F zero" %}
+  ins_encode %{
+    int vector_len = 2;
+    __ vxorps($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector_len);
+  %}
+  ins_pipe( fpu_reg_reg );
+%}
+
+instruct Repl4D_evex(vecY dst, regD src) %{
+  predicate(n->as_Vector()->length() == 4 && VM_Version::supports_avx512vl());
+  match(Set dst (ReplicateD src));
+  format %{ "vbroadcastsd $dst,$src\t! replicate4D" %}
+  ins_encode %{
+    int vector_len = 1;
+    __ evpbroadcastsd($dst$$XMMRegister, $src$$XMMRegister, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl4D_mem_evex(vecY dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 4 && VM_Version::supports_avx512vl());
+  match(Set dst (ReplicateD (LoadD mem)));
+  format %{ "vbroadcastsd  $dst,$mem\t! replicate4D" %}
+  ins_encode %{
+    int vector_len = 1;
+    __ evpbroadcastsd($dst$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl8D_evex(vecZ dst, regD src) %{
+  predicate(n->as_Vector()->length() == 8 && UseAVX > 2);
+  match(Set dst (ReplicateD src));
+  format %{ "vbroadcastsd $dst,$src\t! replicate8D" %}
+  ins_encode %{
+    int vector_len = 2;
+    __ evpbroadcastsd($dst$$XMMRegister, $src$$XMMRegister, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl8D_mem_evex(vecZ dst, memory mem) %{
+  predicate(n->as_Vector()->length() == 8 && UseAVX > 2);
+  match(Set dst (ReplicateD (LoadD mem)));
+  format %{ "vbroadcastsd  $dst,$mem\t! replicate8D" %}
+  ins_encode %{
+    int vector_len = 2;
+    __ evpbroadcastsd($dst$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct Repl8D_zero_evex(vecZ dst, immD0 zero) %{
+  predicate(n->as_Vector()->length() == 8 && UseAVX > 2);
   match(Set dst (ReplicateD zero));
   format %{ "vxorpd  $dst k0,$dst,$dst,vect512\t! replicate8D zero" %}
   ins_encode %{
@@ -4972,6 +5474,17 @@
   ins_pipe( pipe_slow );
 %}
 
+instruct vadd4B_mem(vecS dst, vecS src, memory mem) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
+  match(Set dst (AddVB src (LoadVector mem)));
+  format %{ "vpaddb  $dst,$src,$mem\t! add packed4B" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ vpaddb($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
 instruct vadd8B(vecD dst, vecD src) %{
   predicate(n->as_Vector()->length() == 8);
   match(Set dst (AddVB dst src));
@@ -4993,6 +5506,17 @@
   ins_pipe( pipe_slow );
 %}
 
+instruct vadd8B_mem(vecD dst, vecD src, memory mem) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
+  match(Set dst (AddVB src (LoadVector mem)));
+  format %{ "vpaddb  $dst,$src,$mem\t! add packed8B" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ vpaddb($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
 instruct vadd16B(vecX dst, vecX src) %{
   predicate(n->as_Vector()->length() == 16);
   match(Set dst (AddVB dst src));
@@ -5091,6 +5615,17 @@
   ins_pipe( pipe_slow );
 %}
 
+instruct vadd2S_mem(vecS dst, vecS src, memory mem) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
+  match(Set dst (AddVS src (LoadVector mem)));
+  format %{ "vpaddw  $dst,$src,$mem\t! add packed2S" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ vpaddw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
 instruct vadd4S(vecD dst, vecD src) %{
   predicate(n->as_Vector()->length() == 4);
   match(Set dst (AddVS dst src));
@@ -5112,6 +5647,17 @@
   ins_pipe( pipe_slow );
 %}
 
+instruct vadd4S_mem(vecD dst, vecD src, memory mem) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
+  match(Set dst (AddVS src (LoadVector mem)));
+  format %{ "vpaddw  $dst,$src,$mem\t! add packed4S" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ vpaddw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
 instruct vadd8S(vecX dst, vecX src) %{
   predicate(n->as_Vector()->length() == 8);
   match(Set dst (AddVS dst src));
@@ -5210,6 +5756,17 @@
   ins_pipe( pipe_slow );
 %}
 
+instruct vadd2I_mem(vecD dst, vecD src, memory mem) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
+  match(Set dst (AddVI src (LoadVector mem)));
+  format %{ "vpaddd  $dst,$src,$mem\t! add packed2I" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ vpaddd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
 instruct vadd4I(vecX dst, vecX src) %{
   predicate(n->as_Vector()->length() == 4);
   match(Set dst (AddVI dst src));
@@ -5385,6 +5942,17 @@
   ins_pipe( pipe_slow );
 %}
 
+instruct vadd2F_mem(vecD dst, vecD src, memory mem) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
+  match(Set dst (AddVF src (LoadVector mem)));
+  format %{ "vaddps  $dst,$src,$mem\t! add packed2F" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ vaddps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
 instruct vadd4F(vecX dst, vecX src) %{
   predicate(n->as_Vector()->length() == 4);
   match(Set dst (AddVF dst src));
@@ -5562,6 +6130,17 @@
   ins_pipe( pipe_slow );
 %}
 
+instruct vsub4B_mem(vecS dst, vecS src, memory mem) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
+  match(Set dst (SubVB src (LoadVector mem)));
+  format %{ "vpsubb  $dst,$src,$mem\t! sub packed4B" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ vpsubb($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
 instruct vsub8B(vecD dst, vecD src) %{
   predicate(n->as_Vector()->length() == 8);
   match(Set dst (SubVB dst src));
@@ -5583,6 +6162,17 @@
   ins_pipe( pipe_slow );
 %}
 
+instruct vsub8B_mem(vecD dst, vecD src, memory mem) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
+  match(Set dst (SubVB src (LoadVector mem)));
+  format %{ "vpsubb  $dst,$src,$mem\t! sub packed8B" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ vpsubb($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
 instruct vsub16B(vecX dst, vecX src) %{
   predicate(n->as_Vector()->length() == 16);
   match(Set dst (SubVB dst src));
@@ -5681,6 +6271,17 @@
   ins_pipe( pipe_slow );
 %}
 
+instruct vsub2S_mem(vecS dst, vecS src, memory mem) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
+  match(Set dst (SubVS src (LoadVector mem)));
+  format %{ "vpsubw  $dst,$src,$mem\t! sub packed2S" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ vpsubw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
 instruct vsub4S(vecD dst, vecD src) %{
   predicate(n->as_Vector()->length() == 4);
   match(Set dst (SubVS dst src));
@@ -5702,6 +6303,17 @@
   ins_pipe( pipe_slow );
 %}
 
+instruct vsub4S_mem(vecD dst, vecD src, memory mem) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
+  match(Set dst (SubVS src (LoadVector mem)));
+  format %{ "vpsubw  $dst,$src,$mem\t! sub packed4S" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ vpsubw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
 instruct vsub8S(vecX dst, vecX src) %{
   predicate(n->as_Vector()->length() == 8);
   match(Set dst (SubVS dst src));
@@ -5800,6 +6412,17 @@
   ins_pipe( pipe_slow );
 %}
 
+instruct vsub2I_mem(vecD dst, vecD src, memory mem) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
+  match(Set dst (SubVI src (LoadVector mem)));
+  format %{ "vpsubd  $dst,$src,$mem\t! sub packed2I" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ vpsubd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
 instruct vsub4I(vecX dst, vecX src) %{
   predicate(n->as_Vector()->length() == 4);
   match(Set dst (SubVI dst src));
@@ -5975,6 +6598,17 @@
   ins_pipe( pipe_slow );
 %}
 
+instruct vsub2F_mem(vecD dst, vecD src, memory mem) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
+  match(Set dst (SubVF src (LoadVector mem)));
+  format %{ "vsubps  $dst,$src,$mem\t! sub packed2F" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ vsubps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
 instruct vsub4F(vecX dst, vecX src) %{
   predicate(n->as_Vector()->length() == 4);
   match(Set dst (SubVF dst src));
@@ -6152,6 +6786,17 @@
   ins_pipe( pipe_slow );
 %}
 
+instruct vmul2S_mem(vecS dst, vecS src, memory mem) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
+  match(Set dst (MulVS src (LoadVector mem)));
+  format %{ "vpmullw $dst,$src,$mem\t! mul packed2S" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ vpmullw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
 instruct vmul4S(vecD dst, vecD src) %{
   predicate(n->as_Vector()->length() == 4);
   match(Set dst (MulVS dst src));
@@ -6173,6 +6818,17 @@
   ins_pipe( pipe_slow );
 %}
 
+instruct vmul4S_mem(vecD dst, vecD src, memory mem) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
+  match(Set dst (MulVS src (LoadVector mem)));
+  format %{ "vpmullw $dst,$src,$mem\t! mul packed4S" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ vpmullw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
 instruct vmul8S(vecX dst, vecX src) %{
   predicate(n->as_Vector()->length() == 8);
   match(Set dst (MulVS dst src));
@@ -6271,13 +6927,13 @@
   ins_pipe( pipe_slow );
 %}
 
-instruct vmul2L_reg(vecX dst, vecX src1, vecX src2) %{
-  predicate(UseAVX > 2 && n->as_Vector()->length() == 2 && VM_Version::supports_avx512dq());
-  match(Set dst (MulVL src1 src2));
-  format %{ "vpmullq $dst,$src1,$src2\t! mul packed2L" %}
-  ins_encode %{
-    int vector_len = 0;
-    __ vpmullq($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len);
+instruct vmul2I_mem(vecD dst, vecD src, memory mem) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
+  match(Set dst (MulVI src (LoadVector mem)));
+  format %{ "vpmulld $dst,$src,$mem\t! mul packed2I" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ vpmulld($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len);
   %}
   ins_pipe( pipe_slow );
 %}
@@ -6314,6 +6970,28 @@
   ins_pipe( pipe_slow );
 %}
 
+instruct vmul2L_reg(vecX dst, vecX src1, vecX src2) %{
+  predicate(UseAVX > 2 && n->as_Vector()->length() == 2 && VM_Version::supports_avx512dq());
+  match(Set dst (MulVL src1 src2));
+  format %{ "vpmullq $dst,$src1,$src2\t! mul packed2L" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ vpmullq($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct vmul2L_mem(vecX dst, vecX src, memory mem) %{
+  predicate(UseAVX > 2 && n->as_Vector()->length() == 2 && VM_Version::supports_avx512dq());
+  match(Set dst (MulVL src (LoadVector mem)));
+  format %{ "vpmullq $dst,$src,$mem\t! mul packed2L" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ vpmullq($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
 instruct vmul4L_reg(vecY dst, vecY src1, vecY src2) %{
   predicate(UseAVX > 2 && n->as_Vector()->length() == 4 && VM_Version::supports_avx512dq());
   match(Set dst (MulVL src1 src2));
@@ -6336,17 +7014,6 @@
   ins_pipe( pipe_slow );
 %}
 
-instruct vmul8I_reg(vecY dst, vecY src1, vecY src2) %{
-  predicate(UseAVX > 1 && n->as_Vector()->length() == 8);
-  match(Set dst (MulVI src1 src2));
-  format %{ "vpmulld $dst,$src1,$src2\t! mul packed8I" %}
-  ins_encode %{
-    int vector_len = 1;
-    __ vpmulld($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
 instruct vmul8L_reg(vecZ dst, vecZ src1, vecZ src2) %{
   predicate(UseAVX > 2 && n->as_Vector()->length() == 8 && VM_Version::supports_avx512dq());
   match(Set dst (MulVL src1 src2));
@@ -6358,12 +7025,23 @@
   ins_pipe( pipe_slow );
 %}
 
-instruct vmul16I_reg(vecZ dst, vecZ src1, vecZ src2) %{
-  predicate(UseAVX > 2 && n->as_Vector()->length() == 16);
-  match(Set dst (MulVI src1 src2));
-  format %{ "vpmulld $dst,$src1,$src2\t! mul packed16I" %}
+instruct vmul8L_mem(vecZ dst, vecZ src, memory mem) %{
+  predicate(UseAVX > 2 && n->as_Vector()->length() == 8 && VM_Version::supports_avx512dq());
+  match(Set dst (MulVL src (LoadVector mem)));
+  format %{ "vpmullq $dst,$src,$mem\t! mul packed8L" %}
   ins_encode %{
     int vector_len = 2;
+    __ vpmullq($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct vmul8I_reg(vecY dst, vecY src1, vecY src2) %{
+  predicate(UseAVX > 1 && n->as_Vector()->length() == 8);
+  match(Set dst (MulVI src1 src2));
+  format %{ "vpmulld $dst,$src1,$src2\t! mul packed8I" %}
+  ins_encode %{
+    int vector_len = 1;
     __ vpmulld($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len);
   %}
   ins_pipe( pipe_slow );
@@ -6380,13 +7058,13 @@
   ins_pipe( pipe_slow );
 %}
 
-instruct vmul8L_mem(vecZ dst, vecZ src, memory mem) %{
-  predicate(UseAVX > 2 && n->as_Vector()->length() == 8 && VM_Version::supports_avx512dq());
-  match(Set dst (MulVL src (LoadVector mem)));
-  format %{ "vpmullq $dst,$src,$mem\t! mul packed8L" %}
+instruct vmul16I_reg(vecZ dst, vecZ src1, vecZ src2) %{
+  predicate(UseAVX > 2 && n->as_Vector()->length() == 16);
+  match(Set dst (MulVI src1 src2));
+  format %{ "vpmulld $dst,$src1,$src2\t! mul packed16I" %}
   ins_encode %{
     int vector_len = 2;
-    __ vpmullq($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len);
+    __ vpmulld($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len);
   %}
   ins_pipe( pipe_slow );
 %}
@@ -6424,6 +7102,17 @@
   ins_pipe( pipe_slow );
 %}
 
+instruct vmul2F_mem(vecD dst, vecD src, memory mem) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
+  match(Set dst (MulVF src (LoadVector mem)));
+  format %{ "vmulps  $dst,$src,$mem\t! mul packed2F" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ vmulps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
 instruct vmul4F(vecX dst, vecX src) %{
   predicate(n->as_Vector()->length() == 4);
   match(Set dst (MulVF dst src));
@@ -6601,6 +7290,17 @@
   ins_pipe( pipe_slow );
 %}
 
+instruct vdiv2F_mem(vecD dst, vecD src, memory mem) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
+  match(Set dst (DivVF src (LoadVector mem)));
+  format %{ "vdivps  $dst,$src,$mem\t! div packed2F" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ vdivps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
 instruct vdiv4F(vecX dst, vecX src) %{
   predicate(n->as_Vector()->length() == 4);
   match(Set dst (DivVF dst src));
@@ -7878,6 +8578,17 @@
   ins_pipe( pipe_slow );
 %}
 
+instruct vand4B_mem(vecS dst, vecS src, memory mem) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 4);
+  match(Set dst (AndV src (LoadVector mem)));
+  format %{ "vpand   $dst,$src,$mem\t! and vectors (4 bytes)" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ vpand($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
 instruct vand8B(vecD dst, vecD src) %{
   predicate(n->as_Vector()->length_in_bytes() == 8);
   match(Set dst (AndV dst src));
@@ -7899,6 +8610,17 @@
   ins_pipe( pipe_slow );
 %}
 
+instruct vand8B_mem(vecD dst, vecD src, memory mem) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 8);
+  match(Set dst (AndV src (LoadVector mem)));
+  format %{ "vpand   $dst,$src,$mem\t! and vectors (8 bytes)" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ vpand($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
 instruct vand16B(vecX dst, vecX src) %{
   predicate(n->as_Vector()->length_in_bytes() == 16);
   match(Set dst (AndV dst src));
@@ -7998,6 +8720,17 @@
   ins_pipe( pipe_slow );
 %}
 
+instruct vor4B_mem(vecS dst, vecS src, memory mem) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 4);
+  match(Set dst (OrV src (LoadVector mem)));
+  format %{ "vpor    $dst,$src,$mem\t! or vectors (4 bytes)" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ vpor($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
 instruct vor8B(vecD dst, vecD src) %{
   predicate(n->as_Vector()->length_in_bytes() == 8);
   match(Set dst (OrV dst src));
@@ -8019,6 +8752,17 @@
   ins_pipe( pipe_slow );
 %}
 
+instruct vor8B_mem(vecD dst, vecD src, memory mem) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 4);
+  match(Set dst (OrV src (LoadVector mem)));
+  format %{ "vpor    $dst,$src,$mem\t! or vectors (8 bytes)" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ vpor($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
 instruct vor16B(vecX dst, vecX src) %{
   predicate(n->as_Vector()->length_in_bytes() == 16);
   match(Set dst (OrV dst src));
@@ -8118,6 +8862,17 @@
   ins_pipe( pipe_slow );
 %}
 
+instruct vxor4B_mem(vecS dst, vecS src, memory mem) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 4);
+  match(Set dst (XorV src (LoadVector mem)));
+  format %{ "vpxor   $dst,$src,$mem\t! xor vectors (4 bytes)" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ vpxor($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
 instruct vxor8B(vecD dst, vecD src) %{
   predicate(n->as_Vector()->length_in_bytes() == 8);
   match(Set dst (XorV dst src));
@@ -8139,6 +8894,17 @@
   ins_pipe( pipe_slow );
 %}
 
+instruct vxor8B_mem(vecD dst, vecD src, memory mem) %{
+  predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 8);
+  match(Set dst (XorV src (LoadVector mem)));
+  format %{ "vpxor   $dst,$src,$mem\t! xor vectors (8 bytes)" %}
+  ins_encode %{
+    int vector_len = 0;
+    __ vpxor($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
 instruct vxor16B(vecX dst, vecX src) %{
   predicate(n->as_Vector()->length_in_bytes() == 16);
   match(Set dst (XorV dst src));
--- a/hotspot/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp	Thu Jul 02 14:20:36 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -363,6 +363,11 @@
 #endif
   if (av & AV_SPARC_CBCOND)       features |= cbcond_instructions_m;
 
+#ifndef AV_SPARC_CRC32C
+#define AV_SPARC_CRC32C 0x20000000  /* crc32c instruction supported */
+#endif
+  if (av & AV_SPARC_CRC32C)       features |= crc32c_instruction_m;
+
 #ifndef AV_SPARC_AES
 #define AV_SPARC_AES 0x00020000  /* aes instrs supported */
 #endif
--- a/hotspot/src/share/vm/ci/ciField.cpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/share/vm/ci/ciField.cpp	Thu Jul 02 14:20:36 2015 -0700
@@ -186,6 +186,10 @@
   // Even if general trusting is disabled, trust system-built closures in these packages.
   if (holder->is_in_package("java/lang/invoke") || holder->is_in_package("sun/invoke"))
     return true;
+  // Trust VM anonymous classes. They are private API (sun.misc.Unsafe) and can't be serialized,
+  // so there is no hacking of finals going on with them.
+  if (holder->is_anonymous())
+    return true;
   return TrustFinalNonStaticFields;
 }
 
--- a/hotspot/src/share/vm/ci/ciInstanceKlass.cpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/share/vm/ci/ciInstanceKlass.cpp	Thu Jul 02 14:20:36 2015 -0700
@@ -58,6 +58,7 @@
   _nonstatic_field_size = ik->nonstatic_field_size();
   _has_nonstatic_fields = ik->has_nonstatic_fields();
   _has_default_methods = ik->has_default_methods();
+  _is_anonymous = ik->is_anonymous();
   _nonstatic_fields = NULL; // initialized lazily by compute_nonstatic_fields:
   _has_injected_fields = -1;
   _implementor = NULL; // we will fill these lazily
@@ -101,6 +102,7 @@
   _has_nonstatic_fields = false;
   _nonstatic_fields = NULL;
   _has_injected_fields = -1;
+  _is_anonymous = false;
   _loader = loader;
   _protection_domain = protection_domain;
   _is_shared = false;
--- a/hotspot/src/share/vm/ci/ciInstanceKlass.hpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/share/vm/ci/ciInstanceKlass.hpp	Thu Jul 02 14:20:36 2015 -0700
@@ -53,6 +53,7 @@
   bool                   _has_subklass;
   bool                   _has_nonstatic_fields;
   bool                   _has_default_methods;
+  bool                   _is_anonymous;
 
   ciFlags                _flags;
   jint                   _nonstatic_field_size;
@@ -179,6 +180,10 @@
     return _has_default_methods;
   }
 
+  bool is_anonymous() {
+    return _is_anonymous;
+  }
+
   ciInstanceKlass* get_canonical_holder(int offset);
   ciField* get_field_by_offset(int field_offset, bool is_static);
   ciField* get_field_by_name(ciSymbol* name, ciSymbol* signature, bool is_static);
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp	Thu Jul 02 14:20:36 2015 -0700
@@ -846,6 +846,12 @@
    do_name(     implCompressMB_name,                               "implCompressMultiBlock")                            \
    do_signature(implCompressMB_signature,                          "([BII)I")                                           \
                                                                                                                         \
+  /* support for com.sun.crypto.provider.GHASH */                                                                       \
+  do_class(com_sun_crypto_provider_ghash, "com/sun/crypto/provider/GHASH")                                              \
+  do_intrinsic(_ghash_processBlocks, com_sun_crypto_provider_ghash, processBlocks_name, ghash_processBlocks_signature, F_S) \
+   do_name(processBlocks_name, "processBlocks")                                                                         \
+   do_signature(ghash_processBlocks_signature, "([BII[J[J)V")                                                           \
+                                                                                                                        \
   /* support for java.util.zip */                                                                                       \
   do_class(java_util_zip_CRC32,           "java/util/zip/CRC32")                                                        \
   do_intrinsic(_updateCRC32,               java_util_zip_CRC32,   update_name, int2_int_signature,               F_SN)  \
@@ -857,6 +863,12 @@
    do_name(     updateByteBuffer_name,                           "updateByteBuffer")                                    \
    do_signature(updateByteBuffer_signature,                      "(IJII)I")                                             \
                                                                                                                         \
+  /* support for java.util.zip.CRC32C */                                                                                \
+  do_class(java_util_zip_CRC32C,          "java/util/zip/CRC32C")                                                       \
+  do_intrinsic(_updateBytesCRC32C,         java_util_zip_CRC32C,  updateBytes_name, updateBytes_signature,       F_S)   \
+  do_intrinsic(_updateDirectByteBufferCRC32C, java_util_zip_CRC32C, updateDirectByteBuffer_name, updateByteBuffer_signature, F_S) \
+   do_name(     updateDirectByteBuffer_name,                     "updateDirectByteBuffer")                              \
+                                                                                                                        \
   /* support for sun.misc.Unsafe */                                                                                     \
   do_class(sun_misc_Unsafe,               "sun/misc/Unsafe")                                                            \
                                                                                                                         \
--- a/hotspot/src/share/vm/code/debugInfo.cpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/share/vm/code/debugInfo.cpp	Thu Jul 02 14:20:36 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
 #include "code/debugInfo.hpp"
 #include "code/debugInfoRec.hpp"
 #include "code/nmethod.hpp"
+#include "oops/oop.inline.hpp"
 #include "runtime/handles.inline.hpp"
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
@@ -47,6 +48,12 @@
   write_int(recorder()->oop_recorder()->find_index(h));
 }
 
+oop DebugInfoReadStream::read_oop() {
+  oop o = code()->oop_at(read_int());
+  assert(o->is_oop_or_null(), "oop only");
+  return o;
+}
+
 ScopeValue* DebugInfoReadStream::read_object_value() {
   int id = read_int();
 #ifdef ASSERT
--- a/hotspot/src/share/vm/code/debugInfo.hpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/share/vm/code/debugInfo.hpp	Thu Jul 02 14:20:36 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -266,11 +266,7 @@
 
   } ;
 
-  oop read_oop() {
-    oop o = code()->oop_at(read_int());
-    assert(o == NULL || o->is_oop(), "oop only");
-    return o;
-  }
+  oop read_oop();
   Method* read_method() {
     Method* o = (Method*)(code()->metadata_at(read_int()));
     // is_metadata() is a faster check than is_metaspace_object()
--- a/hotspot/src/share/vm/compiler/compileLog.cpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/share/vm/compiler/compileLog.cpp	Thu Jul 02 14:20:36 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -58,13 +58,15 @@
 CompileLog::~CompileLog() {
   delete _out; // Close fd in fileStream::~fileStream()
   _out = NULL;
+  // Remove partial file after merging in CompileLog::finish_log_on_error
+  unlink(_file);
   FREE_C_HEAP_ARRAY(char, _identities);
   FREE_C_HEAP_ARRAY(char, _file);
 }
 
 
 // see_tag, pop_tag:  Override the default do-nothing methods on xmlStream.
-// These methods provide a hook for managing the the extra context markup.
+// These methods provide a hook for managing the extra context markup.
 void CompileLog::see_tag(const char* tag, bool push) {
   if (_context.size() > 0 && _out != NULL) {
     _out->write(_context.base(), _context.size());
--- a/hotspot/src/share/vm/opto/c2_globals.hpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/share/vm/opto/c2_globals.hpp	Thu Jul 02 14:20:36 2015 -0700
@@ -193,6 +193,13 @@
   product(intx,  LoopMaxUnroll, 16,                                         \
           "Maximum number of unrolls for main loop")                        \
                                                                             \
+  product(bool,  SuperWordLoopUnrollAnalysis, false,                        \
+          "Map number of unrolls for main loop via "                        \
+          "Superword Level Parallelism analysis")                           \
+                                                                            \
+  notproduct(bool, TraceSuperWordLoopUnrollAnalysis, false,                 \
+          "Trace what Superword Level Parallelism analysis applies")        \
+                                                                            \
   product(intx,  LoopUnrollMin, 4,                                          \
           "Minimum number of unroll loop bodies before checking progress"   \
           "of rounds of unroll,optimize,..")                                \
--- a/hotspot/src/share/vm/opto/escape.cpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/share/vm/opto/escape.cpp	Thu Jul 02 14:20:36 2015 -0700
@@ -962,10 +962,12 @@
                  (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre")  == 0 ||
                   strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 ||
                   strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
+                  strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
                   strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 ||
                   strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 ||
                   strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 ||
                   strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 ||
+                  strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||
                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
--- a/hotspot/src/share/vm/opto/library_call.cpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/share/vm/opto/library_call.cpp	Thu Jul 02 14:20:36 2015 -0700
@@ -197,7 +197,7 @@
   CallJavaNode* generate_method_call_virtual(vmIntrinsics::ID method_id) {
     return generate_method_call(method_id, true, false);
   }
-  Node * load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString, bool is_exact, bool is_static);
+  Node * load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString, bool is_exact, bool is_static, ciInstanceKlass * fromKls);
 
   Node* make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2);
   Node* make_string_method_node(int opcode, Node* str1, Node* str2);
@@ -278,6 +278,7 @@
   Node* inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting);
   Node* get_key_start_from_aescrypt_object(Node* aescrypt_object);
   Node* get_original_key_start_from_aescrypt_object(Node* aescrypt_object);
+  bool inline_ghash_processBlocks();
   bool inline_sha_implCompress(vmIntrinsics::ID id);
   bool inline_digestBase_implCompressMB(int predicate);
   bool inline_sha_implCompressMB(Node* digestBaseObj, ciInstanceKlass* instklass_SHA,
@@ -290,6 +291,9 @@
   bool inline_updateCRC32();
   bool inline_updateBytesCRC32();
   bool inline_updateByteBufferCRC32();
+  Node* get_table_from_crc32c_class(ciInstanceKlass *crc32c_class);
+  bool inline_updateBytesCRC32C();
+  bool inline_updateDirectByteBufferCRC32C();
   bool inline_multiplyToLen();
   bool inline_squareToLen();
   bool inline_mulAdd();
@@ -528,12 +532,21 @@
     predicates = 3;
     break;
 
+  case vmIntrinsics::_ghash_processBlocks:
+    if (!UseGHASHIntrinsics) return NULL;
+    break;
+
   case vmIntrinsics::_updateCRC32:
   case vmIntrinsics::_updateBytesCRC32:
   case vmIntrinsics::_updateByteBufferCRC32:
     if (!UseCRC32Intrinsics) return NULL;
     break;
 
+  case vmIntrinsics::_updateBytesCRC32C:
+  case vmIntrinsics::_updateDirectByteBufferCRC32C:
+    if (!UseCRC32CIntrinsics) return NULL;
+    break;
+
   case vmIntrinsics::_incrementExactI:
   case vmIntrinsics::_addExactI:
     if (!Matcher::match_rule_supported(Op_OverflowAddI) || !UseMathExactIntrinsics) return NULL;
@@ -929,6 +942,9 @@
   case vmIntrinsics::_mulAdd:
     return inline_mulAdd();
 
+  case vmIntrinsics::_ghash_processBlocks:
+    return inline_ghash_processBlocks();
+
   case vmIntrinsics::_encodeISOArray:
     return inline_encodeISOArray();
 
@@ -939,6 +955,11 @@
   case vmIntrinsics::_updateByteBufferCRC32:
     return inline_updateByteBufferCRC32();
 
+  case vmIntrinsics::_updateBytesCRC32C:
+    return inline_updateBytesCRC32C();
+  case vmIntrinsics::_updateDirectByteBufferCRC32C:
+    return inline_updateDirectByteBufferCRC32C();
+
   case vmIntrinsics::_profileBoolean:
     return inline_profileBoolean();
   case vmIntrinsics::_isCompileConstant:
@@ -5528,6 +5549,106 @@
   return true;
 }
 
+//------------------------------get_table_from_crc32c_class-----------------------
+Node * LibraryCallKit::get_table_from_crc32c_class(ciInstanceKlass *crc32c_class) {
+  Node* table = load_field_from_object(NULL, "byteTable", "[I", /*is_exact*/ false, /*is_static*/ true, crc32c_class);
+  assert (table != NULL, "wrong version of java.util.zip.CRC32C");
+
+  return table;
+}
+
+//------------------------------inline_updateBytesCRC32C-----------------------
+//
+// Calculate CRC32C for byte[] array.
+// int java.util.zip.CRC32C.updateBytes(int crc, byte[] buf, int off, int end)
+//
+bool LibraryCallKit::inline_updateBytesCRC32C() {
+  assert(UseCRC32CIntrinsics, "need CRC32C instruction support");
+  assert(callee()->signature()->size() == 4, "updateBytes has 4 parameters");
+  assert(callee()->holder()->is_loaded(), "CRC32C class must be loaded");
+  // no receiver since it is a static method
+  Node* crc     = argument(0); // type: int
+  Node* src     = argument(1); // type: oop
+  Node* offset  = argument(2); // type: int
+  Node* end     = argument(3); // type: int
+
+  Node* length = _gvn.transform(new SubINode(end, offset));
+
+  const Type* src_type = src->Value(&_gvn);
+  const TypeAryPtr* top_src = src_type->isa_aryptr();
+  if (top_src  == NULL || top_src->klass()  == NULL) {
+    // failed array check
+    return false;
+  }
+
+  // Figure out the size and type of the elements we will be copying.
+  BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
+  if (src_elem != T_BYTE) {
+    return false;
+  }
+
+  // 'src_start' points to src array + scaled offset
+  Node* src_start = array_element_address(src, offset, src_elem);
+
+  // static final int[] byteTable in class CRC32C
+  Node* table = get_table_from_crc32c_class(callee()->holder());
+  Node* table_start = array_element_address(table, intcon(0), T_INT);
+
+  // We assume that range check is done by caller.
+  // TODO: generate range check (offset+length < src.length) in debug VM.
+
+  // Call the stub.
+  address stubAddr = StubRoutines::updateBytesCRC32C();
+  const char *stubName = "updateBytesCRC32C";
+
+  Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesCRC32C_Type(),
+                                 stubAddr, stubName, TypePtr::BOTTOM,
+                                 crc, src_start, length, table_start);
+  Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
+  set_result(result);
+  return true;
+}
+
+//------------------------------inline_updateDirectByteBufferCRC32C-----------------------
+//
+// Calculate CRC32C for DirectByteBuffer.
+// int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long buf, int off, int end)
+//
+bool LibraryCallKit::inline_updateDirectByteBufferCRC32C() {
+  assert(UseCRC32CIntrinsics, "need CRC32C instruction support");
+  assert(callee()->signature()->size() == 5, "updateDirectByteBuffer has 4 parameters and one is long");
+  assert(callee()->holder()->is_loaded(), "CRC32C class must be loaded");
+  // no receiver since it is a static method
+  Node* crc     = argument(0); // type: int
+  Node* src     = argument(1); // type: long
+  Node* offset  = argument(3); // type: int
+  Node* end     = argument(4); // type: int
+
+  Node* length = _gvn.transform(new SubINode(end, offset));
+
+  src = ConvL2X(src);  // adjust Java long to machine word
+  Node* base = _gvn.transform(new CastX2PNode(src));
+  offset = ConvI2X(offset);
+
+  // 'src_start' points to src array + scaled offset
+  Node* src_start = basic_plus_adr(top(), base, offset);
+
+  // static final int[] byteTable in class CRC32C
+  Node* table = get_table_from_crc32c_class(callee()->holder());
+  Node* table_start = array_element_address(table, intcon(0), T_INT);
+
+  // Call the stub.
+  address stubAddr = StubRoutines::updateBytesCRC32C();
+  const char *stubName = "updateBytesCRC32C";
+
+  Node* call = make_runtime_call(RC_LEAF, OptoRuntime::updateBytesCRC32C_Type(),
+                                 stubAddr, stubName, TypePtr::BOTTOM,
+                                 crc, src_start, length, table_start);
+  Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
+  set_result(result);
+  return true;
+}
+
 //----------------------------inline_reference_get----------------------------
 // public T java.lang.ref.Reference.get();
 bool LibraryCallKit::inline_reference_get() {
@@ -5563,18 +5684,28 @@
 
 
 Node * LibraryCallKit::load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString,
-                                              bool is_exact=true, bool is_static=false) {
-
-  const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
-  assert(tinst != NULL, "obj is null");
-  assert(tinst->klass()->is_loaded(), "obj is not loaded");
-  assert(!is_exact || tinst->klass_is_exact(), "klass not exact");
-
-  ciField* field = tinst->klass()->as_instance_klass()->get_field_by_name(ciSymbol::make(fieldName),
-                                                                          ciSymbol::make(fieldTypeString),
-                                                                          is_static);
+                                              bool is_exact=true, bool is_static=false,
+                                              ciInstanceKlass * fromKls=NULL) {
+  if (fromKls == NULL) {
+    const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
+    assert(tinst != NULL, "obj is null");
+    assert(tinst->klass()->is_loaded(), "obj is not loaded");
+    assert(!is_exact || tinst->klass_is_exact(), "klass not exact");
+    fromKls = tinst->klass()->as_instance_klass();
+  } else {
+    assert(is_static, "only for static field access");
+  }
+  ciField* field = fromKls->get_field_by_name(ciSymbol::make(fieldName),
+                                              ciSymbol::make(fieldTypeString),
+                                              is_static);
+
+  assert (field != NULL, "undefined field");
   if (field == NULL) return (Node *) NULL;
-  assert (field != NULL, "undefined field");
+
+  if (is_static) {
+    const TypeInstPtr* tip = TypeInstPtr::make(fromKls->java_mirror());
+    fromObj = makecon(tip);
+  }
 
   // Next code  copied from Parse::do_get_xxx():
 
@@ -5858,6 +5989,35 @@
   return _gvn.transform(region);
 }
 
+//------------------------------inline_ghash_processBlocks
+bool LibraryCallKit::inline_ghash_processBlocks() {
+  address stubAddr;
+  const char *stubName;
+  assert(UseGHASHIntrinsics, "need GHASH intrinsics support");
+
+  stubAddr = StubRoutines::ghash_processBlocks();
+  stubName = "ghash_processBlocks";
+
+  Node* data           = argument(0);
+  Node* offset         = argument(1);
+  Node* len            = argument(2);
+  Node* state          = argument(3);
+  Node* subkeyH        = argument(4);
+
+  Node* state_start  = array_element_address(state, intcon(0), T_LONG);
+  assert(state_start, "state is NULL");
+  Node* subkeyH_start  = array_element_address(subkeyH, intcon(0), T_LONG);
+  assert(subkeyH_start, "subkeyH is NULL");
+  Node* data_start  = array_element_address(data, offset, T_BYTE);
+  assert(data_start, "data is NULL");
+
+  Node* ghash = make_runtime_call(RC_LEAF|RC_NO_FP,
+                                  OptoRuntime::ghash_processBlocks_Type(),
+                                  stubAddr, stubName, TypePtr::BOTTOM,
+                                  state_start, subkeyH_start, data_start, len);
+  return true;
+}
+
 //------------------------------inline_sha_implCompress-----------------------
 //
 // Calculate SHA (i.e., SHA-1) for single-block byte[] array.
--- a/hotspot/src/share/vm/opto/loopTransform.cpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/share/vm/opto/loopTransform.cpp	Thu Jul 02 14:20:36 2015 -0700
@@ -38,6 +38,7 @@
 #include "opto/rootnode.hpp"
 #include "opto/runtime.hpp"
 #include "opto/subnode.hpp"
+#include "opto/superword.hpp"
 #include "opto/vectornode.hpp"
 
 //------------------------------is_loop_exit-----------------------------------
@@ -640,7 +641,7 @@
 //------------------------------policy_unroll----------------------------------
 // Return TRUE or FALSE if the loop should be unrolled or not.  Unroll if
 // the loop is a CountedLoop and the body is small enough.
-bool IdealLoopTree::policy_unroll( PhaseIdealLoop *phase ) const {
+bool IdealLoopTree::policy_unroll(PhaseIdealLoop *phase) {
 
   CountedLoopNode *cl = _head->as_CountedLoop();
   assert(cl->is_normal_loop() || cl->is_main_loop(), "");
@@ -652,6 +653,8 @@
   // After split at least one iteration will be executed in pre-loop.
   if (cl->trip_count() <= (uint)(cl->is_normal_loop() ? 2 : 1)) return false;
 
+  _local_loop_unroll_limit = LoopUnrollLimit;
+  _local_loop_unroll_factor = 4;
   int future_unroll_ct = cl->unrolled_count() * 2;
   if (future_unroll_ct > LoopMaxUnroll) return false;
 
@@ -747,8 +750,24 @@
     } // switch
   }
 
+  if (UseSuperWord) {
+    if (!cl->is_reduction_loop()) {
+      phase->mark_reductions(this);
+    }
+
+    // Only attempt slp analysis when user controls do not prohibit it
+    if (LoopMaxUnroll > _local_loop_unroll_factor) {
+      // Once policy_slp_analysis succeeds, mark the loop with the
+      // maximal unroll factor so that we minimize analysis passes
+      if ((future_unroll_ct > _local_loop_unroll_factor) ||
+          (body_size > (uint)_local_loop_unroll_limit)) {
+        policy_unroll_slp_analysis(cl, phase, future_unroll_ct);
+      }
+    }
+  }
+
   // Check for being too big
-  if (body_size > (uint)LoopUnrollLimit) {
+  if (body_size > (uint)_local_loop_unroll_limit) {
     if (xors_in_loop >= 4 && body_size < (uint)LoopUnrollLimit*4) return true;
     // Normal case: loop too big
     return false;
@@ -758,6 +777,36 @@
   return true;
 }
 
+void IdealLoopTree::policy_unroll_slp_analysis(CountedLoopNode *cl, PhaseIdealLoop *phase, int future_unroll_ct) {
+  // Enable this functionality target by target as needed
+  if (SuperWordLoopUnrollAnalysis) {
+    if (!cl->has_passed_slp()) {
+      SuperWord sw(phase);
+      sw.transform_loop(this, false);
+
+      // If the loop is slp canonical analyze it
+      if (sw.early_return() == false) {
+        sw.unrolling_analysis(cl, _local_loop_unroll_factor);
+      }
+    }
+
+    int slp_max_unroll_factor = cl->slp_max_unroll();
+    if ((slp_max_unroll_factor > 4) &&
+        (slp_max_unroll_factor >= future_unroll_ct)) {
+      int new_limit = cl->node_count_before_unroll() * slp_max_unroll_factor;
+      if (new_limit > LoopUnrollLimit) {
+#ifndef PRODUCT
+        if (TraceSuperWordLoopUnrollAnalysis) {
+          tty->print_cr("slp analysis is applying unroll limit  %d, the original limit was %d\n",
+            new_limit, _local_loop_unroll_limit);
+        }
+#endif
+        _local_loop_unroll_limit = new_limit;
+      }
+    }
+  }
+}
+
 //------------------------------policy_align-----------------------------------
 // Return TRUE or FALSE if the loop should be cache-line aligned.  Gather the
 // expression that does the alignment.  Note that only one array base can be
@@ -1611,6 +1660,7 @@
               // iff the uses conform
               if (ok) {
                 def_node->add_flag(Node::Flag_is_reduction);
+                loop_head->mark_has_reductions();
               }
             }
           }
@@ -2517,7 +2567,6 @@
     // and we'd rather unroll the post-RCE'd loop SO... do not unroll if
     // peeling.
     if (should_unroll && !should_peel) {
-      phase->mark_reductions(this);
       phase->do_unroll(this, old_new, true);
     }
 
--- a/hotspot/src/share/vm/opto/loopnode.cpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/share/vm/opto/loopnode.cpp	Thu Jul 02 14:20:36 2015 -0700
@@ -2408,7 +2408,7 @@
     for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
       IdealLoopTree* lpt = iter.current();
       if (lpt->is_counted()) {
-        sw.transform_loop(lpt);
+        sw.transform_loop(lpt, true);
       }
     }
   }
--- a/hotspot/src/share/vm/opto/loopnode.hpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/share/vm/opto/loopnode.hpp	Thu Jul 02 14:20:36 2015 -0700
@@ -62,7 +62,9 @@
          HasExactTripCount=8,
          InnerLoop=16,
          PartialPeelLoop=32,
-         PartialPeelFailed=64 };
+         PartialPeelFailed=64,
+         HasReductions=128,
+         PassedSlpAnalysis=256 };
   char _unswitch_count;
   enum { _unswitch_max=3 };
 
@@ -77,6 +79,8 @@
   void set_partial_peel_loop() { _loop_flags |= PartialPeelLoop; }
   int partial_peel_has_failed() const { return _loop_flags & PartialPeelFailed; }
   void mark_partial_peel_failed() { _loop_flags |= PartialPeelFailed; }
+  void mark_has_reductions() { _loop_flags |= HasReductions; }
+  void mark_passed_slp() { _loop_flags |= PassedSlpAnalysis; }
 
   int unswitch_max() { return _unswitch_max; }
   int unswitch_count() { return _unswitch_count; }
@@ -155,11 +159,15 @@
   // unroll,optimize,unroll,optimize,... is making progress
   int _node_count_before_unroll;
 
+  // If slp analysis is performed we record the maximum
+  // vector mapped unroll factor here
+  int _slp_maximum_unroll_factor;
+
 public:
   CountedLoopNode( Node *entry, Node *backedge )
     : LoopNode(entry, backedge), _main_idx(0), _trip_count(max_juint),
       _profile_trip_cnt(COUNT_UNKNOWN), _unrolled_count_log2(0),
-      _node_count_before_unroll(0) {
+      _node_count_before_unroll(0), _slp_maximum_unroll_factor(0) {
     init_class_id(Class_CountedLoop);
     // Initialize _trip_count to the largest possible value.
     // Will be reset (lower) if the loop's trip count is known.
@@ -199,10 +207,12 @@
 
   // A 'main' loop that is ONLY unrolled or peeled, never RCE'd or
   // Aligned, may be missing it's pre-loop.
-  int is_normal_loop() const { return (_loop_flags&PreMainPostFlagsMask) == Normal; }
-  int is_pre_loop   () const { return (_loop_flags&PreMainPostFlagsMask) == Pre;    }
-  int is_main_loop  () const { return (_loop_flags&PreMainPostFlagsMask) == Main;   }
-  int is_post_loop  () const { return (_loop_flags&PreMainPostFlagsMask) == Post;   }
+  int is_normal_loop   () const { return (_loop_flags&PreMainPostFlagsMask) == Normal; }
+  int is_pre_loop      () const { return (_loop_flags&PreMainPostFlagsMask) == Pre;    }
+  int is_main_loop     () const { return (_loop_flags&PreMainPostFlagsMask) == Main;   }
+  int is_post_loop     () const { return (_loop_flags&PreMainPostFlagsMask) == Post;   }
+  int is_reduction_loop() const { return (_loop_flags&HasReductions) == HasReductions; }
+  int has_passed_slp   () const { return (_loop_flags&PassedSlpAnalysis) == PassedSlpAnalysis; }
   int is_main_no_pre_loop() const { return _loop_flags & MainHasNoPreLoop; }
   void set_main_no_pre_loop() { _loop_flags |= MainHasNoPreLoop; }
 
@@ -232,8 +242,10 @@
   void double_unrolled_count() { _unrolled_count_log2++; }
   int  unrolled_count()        { return 1 << MIN2(_unrolled_count_log2, BitsPerInt-3); }
 
-  void set_node_count_before_unroll(int ct) { _node_count_before_unroll = ct; }
-  int  node_count_before_unroll()           { return _node_count_before_unroll; }
+  void set_node_count_before_unroll(int ct)  { _node_count_before_unroll = ct; }
+  int  node_count_before_unroll()            { return _node_count_before_unroll; }
+  void set_slp_max_unroll(int unroll_factor) { _slp_maximum_unroll_factor = unroll_factor; }
+  int  slp_max_unroll() const                { return _slp_maximum_unroll_factor; }
 
 #ifndef PRODUCT
   virtual void dump_spec(outputStream *st) const;
@@ -336,6 +348,8 @@
   Node *_tail;                  // Tail of loop
   inline Node *tail();          // Handle lazy update of _tail field
   PhaseIdealLoop* _phase;
+  int _local_loop_unroll_limit;
+  int _local_loop_unroll_factor;
 
   Node_List _body;              // Loop body for inner loops
 
@@ -356,7 +370,8 @@
       _safepts(NULL),
       _required_safept(NULL),
       _allow_optimizations(true),
-      _nest(0), _irreducible(0), _has_call(0), _has_sfpt(0), _rce_candidate(0)
+      _nest(0), _irreducible(0), _has_call(0), _has_sfpt(0), _rce_candidate(0),
+      _local_loop_unroll_limit(0), _local_loop_unroll_factor(0)
   { }
 
   // Is 'l' a member of 'this'?
@@ -444,7 +459,10 @@
 
   // Return TRUE or FALSE if the loop should be unrolled or not.  Unroll if
   // the loop is a CountedLoop and the body is small enough.
-  bool policy_unroll( PhaseIdealLoop *phase ) const;
+  bool policy_unroll(PhaseIdealLoop *phase);
+
+  // Loop analyses to map to a maximal superword unrolling for vectorization.
+  void policy_unroll_slp_analysis(CountedLoopNode *cl, PhaseIdealLoop *phase, int future_unroll_ct);
 
   // Return TRUE or FALSE if the loop should be range-check-eliminated.
   // Gather a list of IF tests that are dominated by iteration splitting;
--- a/hotspot/src/share/vm/opto/runtime.cpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/share/vm/opto/runtime.cpp	Thu Jul 02 14:20:36 2015 -0700
@@ -851,6 +851,29 @@
   return TypeFunc::make(domain, range);
 }
 
+/**
+ * int updateBytesCRC32C(int crc, byte* buf, int len, int* table)
+ */
+const TypeFunc* OptoRuntime::updateBytesCRC32C_Type() {
+  // create input type (domain)
+  int num_args      = 4;
+  int argcnt = num_args;
+  const Type** fields = TypeTuple::fields(argcnt);
+  int argp = TypeFunc::Parms;
+  fields[argp++] = TypeInt::INT;        // crc
+  fields[argp++] = TypePtr::NOTNULL;    // buf
+  fields[argp++] = TypeInt::INT;        // len
+  fields[argp++] = TypePtr::NOTNULL;    // table
+  assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
+  const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
+
+  // result type needed
+  fields = TypeTuple::fields(1);
+  fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result
+  const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
+  return TypeFunc::make(domain, range);
+}
+
 // for cipherBlockChaining calls of aescrypt encrypt/decrypt, four pointers and a length, returning int
 const TypeFunc* OptoRuntime::cipherBlockChaining_aescrypt_Type() {
   // create input type (domain)
@@ -987,7 +1010,25 @@
   return TypeFunc::make(domain, range);
 }
 
+// GHASH block processing
+const TypeFunc* OptoRuntime::ghash_processBlocks_Type() {
+    int argcnt = 4;
 
+    const Type** fields = TypeTuple::fields(argcnt);
+    int argp = TypeFunc::Parms;
+    fields[argp++] = TypePtr::NOTNULL;    // state
+    fields[argp++] = TypePtr::NOTNULL;    // subkeyH
+    fields[argp++] = TypePtr::NOTNULL;    // data
+    fields[argp++] = TypeInt::INT;        // blocks
+    assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
+    const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
+
+    // result type needed
+    fields = TypeTuple::fields(1);
+    fields[TypeFunc::Parms+0] = NULL; // void
+    const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
+    return TypeFunc::make(domain, range);
+}
 
 //------------- Interpreter state access for on stack replacement
 const TypeFunc* OptoRuntime::osr_end_Type() {
--- a/hotspot/src/share/vm/opto/runtime.hpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/share/vm/opto/runtime.hpp	Thu Jul 02 14:20:36 2015 -0700
@@ -316,7 +316,10 @@
 
   static const TypeFunc* mulAdd_Type();
 
+  static const TypeFunc* ghash_processBlocks_Type();
+
   static const TypeFunc* updateBytesCRC32_Type();
+  static const TypeFunc* updateBytesCRC32C_Type();
 
   // leaf on stack replacement interpreter accessor types
   static const TypeFunc* osr_end_Type();
--- a/hotspot/src/share/vm/opto/superword.cpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/share/vm/opto/superword.cpp	Thu Jul 02 14:20:36 2015 -0700
@@ -68,6 +68,7 @@
   _bb(NULL),                              // basic block
   _iv(NULL),                              // induction var
   _race_possible(false),                  // cases where SDMU is true
+  _early_return(true),                    // analysis evaluations routine
   _num_work_vecs(0),                      // amount of vector work we have
   _num_reductions(0),                     // amount of reduction work we have
   _do_vector_loop(phase->C->do_vector_loop()),  // whether to do vectorization/simd style
@@ -78,7 +79,7 @@
 {}
 
 //------------------------------transform_loop---------------------------
-void SuperWord::transform_loop(IdealLoopTree* lpt) {
+void SuperWord::transform_loop(IdealLoopTree* lpt, bool do_optimization) {
   assert(UseSuperWord, "should be");
   // Do vectors exist on this architecture?
   if (Matcher::vector_width_in_bytes(T_BYTE) < 2) return;
@@ -113,8 +114,164 @@
   // For now, define one block which is the entire loop body
   set_bb(cl);
 
-  assert(_packset.length() == 0, "packset must be empty");
-  SLP_extract();
+  if (do_optimization) {
+    assert(_packset.length() == 0, "packset must be empty");
+    SLP_extract();
+  }
+}
+
+//------------------------------early unrolling analysis------------------------------
+void SuperWord::unrolling_analysis(CountedLoopNode *cl, int &local_loop_unroll_factor) {
+  bool is_slp = true;
+  ResourceMark rm;
+  size_t ignored_size = lpt()->_body.size();
+  int *ignored_loop_nodes = NEW_RESOURCE_ARRAY(int, ignored_size);
+  Node_Stack nstack((int)ignored_size);
+  Node *cl_exit = cl->loopexit();
+
+  // First clear the entries
+  for (uint i = 0; i < lpt()->_body.size(); i++) {
+    ignored_loop_nodes[i] = -1;
+  }
+
+  int max_vector = Matcher::max_vector_size(T_INT);
+
+  // Process the loop, some/all of the stack entries will not be in order, ergo
+  // need to preprocess the ignored initial state before we process the loop
+  for (uint i = 0; i < lpt()->_body.size(); i++) {
+    Node* n = lpt()->_body.at(i);
+    if (n == cl->incr() ||
+      n->is_reduction() ||
+      n->is_AddP() ||
+      n->is_Cmp() ||
+      n->is_IfTrue() ||
+      n->is_CountedLoop() ||
+      (n == cl_exit)) {
+      ignored_loop_nodes[i] = n->_idx;
+      continue;
+    }
+
+    if (n->is_If()) {
+      IfNode *iff = n->as_If();
+      if (iff->_fcnt != COUNT_UNKNOWN && iff->_prob != PROB_UNKNOWN) {
+        if (lpt()->is_loop_exit(iff)) {
+          ignored_loop_nodes[i] = n->_idx;
+          continue;
+        }
+      }
+    }
+
+    if (n->is_Phi() && (n->bottom_type() == Type::MEMORY)) {
+      Node* n_tail = n->in(LoopNode::LoopBackControl);
+      if (n_tail != n->in(LoopNode::EntryControl)) {
+        if (!n_tail->is_Mem()) {
+          is_slp = false;
+          break;
+        }
+      }
+    }
+
+    // This must happen after check of phi/if
+    if (n->is_Phi() || n->is_If()) {
+      ignored_loop_nodes[i] = n->_idx;
+      continue;
+    }
+
+    if (n->is_LoadStore() || n->is_MergeMem() ||
+      (n->is_Proj() && !n->as_Proj()->is_CFG())) {
+      is_slp = false;
+      break;
+    }
+
+    // Ignore nodes with non-primitive type.
+    BasicType bt;
+    if (n->is_Mem()) {
+      bt = n->as_Mem()->memory_type();
+    } else {
+      bt = n->bottom_type()->basic_type();
+    }
+    if (is_java_primitive(bt) == false) {
+      ignored_loop_nodes[i] = n->_idx;
+      continue;
+    }
+
+    if (n->is_Mem()) {
+      MemNode* current = n->as_Mem();
+      Node* adr = n->in(MemNode::Address);
+      Node* n_ctrl = _phase->get_ctrl(adr);
+
+      // save a queue of post process nodes
+      if (n_ctrl != NULL && lpt()->is_member(_phase->get_loop(n_ctrl))) {
+        // Process the memory expression
+        int stack_idx = 0;
+        bool have_side_effects = true;
+        if (adr->is_AddP() == false) {
+          nstack.push(adr, stack_idx++);
+        } else {
+          // Mark the components of the memory operation in nstack
+          SWPointer p1(current, this, &nstack, true);
+          have_side_effects = p1.node_stack()->is_nonempty();
+        }
+
+        // Process the pointer stack
+        while (have_side_effects) {
+          Node* pointer_node = nstack.node();
+          for (uint j = 0; j < lpt()->_body.size(); j++) {
+            Node* cur_node = lpt()->_body.at(j);
+            if (cur_node == pointer_node) {
+              ignored_loop_nodes[j] = cur_node->_idx;
+              break;
+            }
+          }
+          nstack.pop();
+          have_side_effects = nstack.is_nonempty();
+        }
+      }
+    }
+  }
+
+  if (is_slp) {
+    // Now we try to find the maximum supported consistent vector which the machine
+    // description can use
+    for (uint i = 0; i < lpt()->_body.size(); i++) {
+      if (ignored_loop_nodes[i] != -1) continue;
+
+      BasicType bt;
+      Node* n = lpt()->_body.at(i);
+      if (n->is_Mem()) {
+        bt = n->as_Mem()->memory_type();
+      } else {
+        bt = n->bottom_type()->basic_type();
+      }
+      if (is_java_primitive(bt) == false) continue;
+
+      int cur_max_vector = Matcher::max_vector_size(bt);
+
+      // If a max vector exists which is not larger than _local_loop_unroll_factor
+      // stop looking, we already have the max vector to map to.
+      if (cur_max_vector <= local_loop_unroll_factor) {
+        is_slp = false;
+#ifndef PRODUCT
+        if (TraceSuperWordLoopUnrollAnalysis) {
+          tty->print_cr("slp analysis fails: unroll limit equals max vector\n");
+        }
+#endif
+        break;
+      }
+
+      // Map the maximal common vector
+      if (VectorNode::implemented(n->Opcode(), cur_max_vector, bt)) {
+        if (cur_max_vector < max_vector) {
+          max_vector = cur_max_vector;
+        }
+      }
+    }
+    if (is_slp) {
+      local_loop_unroll_factor = max_vector;
+    }
+    cl->mark_passed_slp();
+    cl->set_slp_max_unroll(local_loop_unroll_factor);
+  }
 }
 
 //------------------------------SLP_extract---------------------------
@@ -268,12 +425,12 @@
       best_iv_adjustment = iv_adjustment;
     }
 
-    SWPointer align_to_ref_p(mem_ref, this);
+    SWPointer align_to_ref_p(mem_ref, this, NULL, false);
     // Set alignment relative to "align_to_ref" for all related memory operations.
     for (int i = memops.size() - 1; i >= 0; i--) {
       MemNode* s = memops.at(i)->as_Mem();
       if (isomorphic(s, mem_ref)) {
-        SWPointer p2(s, this);
+        SWPointer p2(s, this, NULL, false);
         if (p2.comparable(align_to_ref_p)) {
           int align = memory_alignment(s, iv_adjustment);
           set_alignment(s, align);
@@ -294,7 +451,7 @@
           // iterations in pre-loop will be not enough to align it.
           create_pack = false;
         } else {
-          SWPointer p2(best_align_to_mem_ref, this);
+          SWPointer p2(best_align_to_mem_ref, this, NULL, false);
           if (align_to_ref_p.invar() != p2.invar()) {
             // Do not vectorize memory accesses with different invariants
             // if unaligned memory accesses are not allowed.
@@ -411,7 +568,7 @@
   // Count number of comparable memory ops
   for (uint i = 0; i < memops.size(); i++) {
     MemNode* s1 = memops.at(i)->as_Mem();
-    SWPointer p1(s1, this);
+    SWPointer p1(s1, this, NULL, false);
     // Discard if pre loop can't align this reference
     if (!ref_is_alignable(p1)) {
       *cmp_ct.adr_at(i) = 0;
@@ -420,7 +577,7 @@
     for (uint j = i+1; j < memops.size(); j++) {
       MemNode* s2 = memops.at(j)->as_Mem();
       if (isomorphic(s1, s2)) {
-        SWPointer p2(s2, this);
+        SWPointer p2(s2, this, NULL, false);
         if (p1.comparable(p2)) {
           (*cmp_ct.adr_at(i))++;
           (*cmp_ct.adr_at(j))++;
@@ -441,7 +598,7 @@
     if (s->is_Store()) {
       int vw = vector_width_in_bytes(s);
       assert(vw > 1, "sanity");
-      SWPointer p(s, this);
+      SWPointer p(s, this, NULL, false);
       if (cmp_ct.at(j) >  max_ct ||
           cmp_ct.at(j) == max_ct &&
             (vw >  max_vw ||
@@ -464,7 +621,7 @@
       if (s->is_Load()) {
         int vw = vector_width_in_bytes(s);
         assert(vw > 1, "sanity");
-        SWPointer p(s, this);
+        SWPointer p(s, this, NULL, false);
         if (cmp_ct.at(j) >  max_ct ||
             cmp_ct.at(j) == max_ct &&
               (vw >  max_vw ||
@@ -575,7 +732,7 @@
 //---------------------------get_iv_adjustment---------------------------
 // Calculate loop's iv adjustment for this memory ops.
 int SuperWord::get_iv_adjustment(MemNode* mem_ref) {
-  SWPointer align_to_ref_p(mem_ref, this);
+  SWPointer align_to_ref_p(mem_ref, this, NULL, false);
   int offset = align_to_ref_p.offset_in_bytes();
   int scale  = align_to_ref_p.scale_in_bytes();
   int elt_size = align_to_ref_p.memory_size();
@@ -649,13 +806,13 @@
       if (_dg.dep(s1)->in_cnt() == 0) {
         _dg.make_edge(slice, s1);
       }
-      SWPointer p1(s1->as_Mem(), this);
+      SWPointer p1(s1->as_Mem(), this, NULL, false);
       bool sink_dependent = true;
       for (int k = j - 1; k >= 0; k--) {
         Node* s2 = _nlist.at(k);
         if (s1->is_Load() && s2->is_Load())
           continue;
-        SWPointer p2(s2->as_Mem(), this);
+        SWPointer p2(s2->as_Mem(), this, NULL, false);
 
         int cmp = p1.cmp(p2);
         if (SuperWordRTDepCheck &&
@@ -795,8 +952,8 @@
   if (_phase->C->get_alias_index(s1->as_Mem()->adr_type()) !=
       _phase->C->get_alias_index(s2->as_Mem()->adr_type()))
     return false;
-  SWPointer p1(s1->as_Mem(), this);
-  SWPointer p2(s2->as_Mem(), this);
+  SWPointer p1(s1->as_Mem(), this, NULL, false);
+  SWPointer p2(s2->as_Mem(), this, NULL, false);
   if (p1.base() != p2.base() || !p1.comparable(p2)) return false;
   int diff = p2.offset_in_bytes() - p1.offset_in_bytes();
   return diff == data_size(s1);
@@ -1615,13 +1772,13 @@
       if (n->is_Load()) {
         Node* ctl = n->in(MemNode::Control);
         Node* mem = first->in(MemNode::Memory);
-        SWPointer p1(n->as_Mem(), this);
+        SWPointer p1(n->as_Mem(), this, NULL, false);
         // Identify the memory dependency for the new loadVector node by
         // walking up through memory chain.
         // This is done to give flexibility to the new loadVector node so that
         // it can move above independent storeVector nodes.
         while (mem->is_StoreVector()) {
-          SWPointer p2(mem->as_Mem(), this);
+          SWPointer p2(mem->as_Mem(), this, NULL, false);
           int cmp = p1.cmp(p2);
           if (SWPointer::not_equal(cmp) || !SWPointer::comparable(cmp)) {
             mem = mem->in(MemNode::Memory);
@@ -2138,7 +2295,7 @@
 //------------------------------memory_alignment---------------------------
 // Alignment within a vector memory reference
 int SuperWord::memory_alignment(MemNode* s, int iv_adjust) {
-  SWPointer p(s, this);
+  SWPointer p(s, this, NULL, false);
   if (!p.valid()) {
     return bottom_align;
   }
@@ -2315,7 +2472,7 @@
   Node *orig_limit = pre_opaq->original_loop_limit();
   assert(orig_limit != NULL && _igvn.type(orig_limit) != Type::TOP, "");
 
-  SWPointer align_to_ref_p(align_to_ref, this);
+  SWPointer align_to_ref_p(align_to_ref, this, NULL, false);
   assert(align_to_ref_p.valid(), "sanity");
 
   // Given:
@@ -2489,6 +2646,7 @@
   _bb = NULL;
   _iv = NULL;
   _race_possible = 0;
+  _early_return = false;
   _num_work_vecs = 0;
   _num_reductions = 0;
 }
@@ -2559,9 +2717,11 @@
 //==============================SWPointer===========================
 
 //----------------------------SWPointer------------------------
-SWPointer::SWPointer(MemNode* mem, SuperWord* slp) :
+SWPointer::SWPointer(MemNode* mem, SuperWord* slp, Node_Stack *nstack, bool analyze_only) :
   _mem(mem), _slp(slp),  _base(NULL),  _adr(NULL),
-  _scale(0), _offset(0), _invar(NULL), _negate_invar(false) {
+  _scale(0), _offset(0), _invar(NULL), _negate_invar(false),
+  _nstack(nstack), _analyze_only(analyze_only),
+  _stack_idx(0) {
 
   Node* adr = mem->in(MemNode::Address);
   if (!adr->is_AddP()) {
@@ -2599,7 +2759,9 @@
 // the pattern match of an address expression.
 SWPointer::SWPointer(SWPointer* p) :
   _mem(p->_mem), _slp(p->_slp),  _base(NULL),  _adr(NULL),
-  _scale(0), _offset(0), _invar(NULL), _negate_invar(false) {}
+  _scale(0), _offset(0), _invar(NULL), _negate_invar(false),
+  _nstack(p->_nstack), _analyze_only(p->_analyze_only),
+  _stack_idx(p->_stack_idx) {}
 
 //------------------------scaled_iv_plus_offset--------------------
 // Match: k*iv + offset
@@ -2642,6 +2804,9 @@
     _scale = 1;
     return true;
   }
+  if (_analyze_only && (invariant(n) == false)) {
+    _nstack->push(n, _stack_idx++);
+  }
   int opc = n->Opcode();
   if (opc == Op_MulI) {
     if (n->in(1) == iv() && n->in(2)->is_Con()) {
@@ -2699,6 +2864,9 @@
     return false;
   }
   if (_invar != NULL) return false; // already have an invariant
+  if (_analyze_only && (invariant(n) == false)) {
+    _nstack->push(n, _stack_idx++);
+  }
   if (opc == Op_AddI) {
     if (n->in(2)->is_Con() && invariant(n->in(1))) {
       _negate_invar = negate;
--- a/hotspot/src/share/vm/opto/superword.hpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/share/vm/opto/superword.hpp	Thu Jul 02 14:20:36 2015 -0700
@@ -239,12 +239,15 @@
  public:
   SuperWord(PhaseIdealLoop* phase);
 
-  void transform_loop(IdealLoopTree* lpt);
+  void transform_loop(IdealLoopTree* lpt, bool do_optimization);
+
+  void unrolling_analysis(CountedLoopNode *cl, int &local_loop_unroll_factor);
 
   // Accessors for SWPointer
   PhaseIdealLoop* phase()          { return _phase; }
   IdealLoopTree* lpt()             { return _lpt; }
   PhiNode* iv()                    { return _iv; }
+  bool early_return()              { return _early_return; }
 
  private:
   IdealLoopTree* _lpt;             // Current loop tree node
@@ -252,6 +255,7 @@
   Node*          _bb;              // Current basic block
   PhiNode*       _iv;              // Induction var
   bool           _race_possible;   // In cases where SDMU is true
+  bool           _early_return;    // True if we do not initialize
   bool           _do_vector_loop;  // whether to do vectorization/simd style
   bool           _vector_loop_debug; // provide more printing in debug mode
   int            _num_work_vecs;   // Number of non memory vector operations
@@ -462,15 +466,18 @@
 // Information about an address for dependence checking and vector alignment
 class SWPointer VALUE_OBJ_CLASS_SPEC {
  protected:
-  MemNode*   _mem;     // My memory reference node
-  SuperWord* _slp;     // SuperWord class
+  MemNode*   _mem;           // My memory reference node
+  SuperWord* _slp;           // SuperWord class
 
-  Node* _base;         // NULL if unsafe nonheap reference
-  Node* _adr;          // address pointer
-  jint  _scale;        // multiplier for iv (in bytes), 0 if no loop iv
-  jint  _offset;       // constant offset (in bytes)
-  Node* _invar;        // invariant offset (in bytes), NULL if none
-  bool  _negate_invar; // if true then use: (0 - _invar)
+  Node* _base;               // NULL if unsafe nonheap reference
+  Node* _adr;                // address pointer
+  jint  _scale;              // multiplier for iv (in bytes), 0 if no loop iv
+  jint  _offset;             // constant offset (in bytes)
+  Node* _invar;              // invariant offset (in bytes), NULL if none
+  bool  _negate_invar;       // if true then use: (0 - _invar)
+  Node_Stack* _nstack;       // stack used to record a swpointer trace of variants
+  bool        _analyze_only; // Used in loop unrolling only for swpointer trace
+  uint        _stack_idx;    // Used in loop unrolling only for swpointer trace
 
   PhaseIdealLoop* phase() { return _slp->phase(); }
   IdealLoopTree*  lpt()   { return _slp->lpt(); }
@@ -497,7 +504,7 @@
     NotComparable = (Less | Greater | Equal)
   };
 
-  SWPointer(MemNode* mem, SuperWord* slp);
+  SWPointer(MemNode* mem, SuperWord* slp, Node_Stack *nstack, bool analyze_only);
   // Following is used to create a temporary object during
   // the pattern match of an address expression.
   SWPointer(SWPointer* p);
@@ -505,14 +512,15 @@
   bool valid()  { return _adr != NULL; }
   bool has_iv() { return _scale != 0; }
 
-  Node* base()            { return _base; }
-  Node* adr()             { return _adr; }
-  MemNode* mem()          { return _mem; }
-  int   scale_in_bytes()  { return _scale; }
-  Node* invar()           { return _invar; }
-  bool  negate_invar()    { return _negate_invar; }
-  int   offset_in_bytes() { return _offset; }
-  int   memory_size()     { return _mem->memory_size(); }
+  Node* base()             { return _base; }
+  Node* adr()              { return _adr; }
+  MemNode* mem()           { return _mem; }
+  int   scale_in_bytes()   { return _scale; }
+  Node* invar()            { return _invar; }
+  bool  negate_invar()     { return _negate_invar; }
+  int   offset_in_bytes()  { return _offset; }
+  int   memory_size()      { return _mem->memory_size(); }
+  Node_Stack* node_stack() { return _nstack; }
 
   // Comparable?
   int cmp(SWPointer& q) {
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Thu Jul 02 14:20:36 2015 -0700
@@ -3771,8 +3771,12 @@
   if (TieredCompilation) {
     set_tiered_flags();
   } else {
-    // Check if the policy is valid. Policies 0 and 1 are valid for non-tiered setup.
-    if (CompilationPolicyChoice >= 2) {
+    int max_compilation_policy_choice = 1;
+#ifdef COMPILER2
+    max_compilation_policy_choice = 2;
+#endif
+    // Check if the policy is valid.
+    if (CompilationPolicyChoice >= max_compilation_policy_choice) {
       vm_exit_during_initialization(
         "Incompatible compilation policy selected", NULL);
     }
--- a/hotspot/src/share/vm/runtime/compilationPolicy.cpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/share/vm/runtime/compilationPolicy.cpp	Thu Jul 02 14:20:36 2015 -0700
@@ -512,7 +512,7 @@
     RegisterMap reg_map(thread, false);
     javaVFrame* triggerVF = thread->last_java_vframe(&reg_map);
     // triggerVF is the frame that triggered its counter
-    RFrame* first = new InterpretedRFrame(triggerVF->fr(), thread, m);
+    RFrame* first = new InterpretedRFrame(triggerVF->fr(), thread, m());
 
     if (first->top_method()->code() != NULL) {
       // called obsolete method/nmethod -- no need to recompile
@@ -557,8 +557,8 @@
     if( !next )               // No next frame up the stack?
       break;                  // Then compile with current frame
 
-    methodHandle m = current->top_method();
-    methodHandle next_m = next->top_method();
+    Method* m = current->top_method();
+    Method* next_m = next->top_method();
 
     if (TraceCompilationPolicy && Verbose) {
       tty->print("[caller: ");
@@ -644,7 +644,7 @@
     if (TraceCompilationPolicy && Verbose) {
       tty->print("\n\t     check caller: ");
       next_m->print_short_name(tty);
-      tty->print(" ( interpreted " INTPTR_FORMAT ", size=%d ) ", p2i((address)next_m()), next_m->code_size());
+      tty->print(" ( interpreted " INTPTR_FORMAT ", size=%d ) ", p2i((address)next_m), next_m->code_size());
     }
 
     current = next;
--- a/hotspot/src/share/vm/runtime/globals.hpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Thu Jul 02 14:20:36 2015 -0700
@@ -730,6 +730,9 @@
   product(bool, UseSHA, false,                                              \
           "Control whether SHA instructions can be used on SPARC")          \
                                                                             \
+  product(bool, UseGHASHIntrinsics, false,                                  \
+          "Use intrinsics for GHASH versions of crypto")                    \
+                                                                            \
   product(size_t, LargePageSizeInBytes, 0,                                  \
           "Large page size (0 to let VM choose the page size)")             \
                                                                             \
@@ -845,6 +848,9 @@
   product(bool, UseCRC32Intrinsics, false,                                  \
           "use intrinsics for java.util.zip.CRC32")                         \
                                                                             \
+  product(bool, UseCRC32CIntrinsics, false,                                 \
+          "use intrinsics for java.util.zip.CRC32C")                        \
+                                                                            \
   develop(bool, TraceCallFixup, false,                                      \
           "Trace all call fixups")                                          \
                                                                             \
--- a/hotspot/src/share/vm/runtime/rframe.cpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/share/vm/runtime/rframe.cpp	Thu Jul 02 14:20:36 2015 -0700
@@ -52,12 +52,12 @@
 : RFrame(fr, thread, callee) {
   RegisterMap map(thread, false);
   _vf     = javaVFrame::cast(vframe::new_vframe(&_fr, &map, thread));
-  _method = methodHandle(thread, _vf->method());
+  _method = _vf->method();
   assert(   _vf->is_interpreted_frame(), "must be interpreted");
   init();
 }
 
-InterpretedRFrame::InterpretedRFrame(frame fr, JavaThread* thread, methodHandle m)
+InterpretedRFrame::InterpretedRFrame(frame fr, JavaThread* thread, Method* m)
 : RFrame(fr, thread, NULL) {
   RegisterMap map(thread, false);
   _vf     = javaVFrame::cast(vframe::new_vframe(&_fr, &map, thread));
@@ -140,8 +140,8 @@
   _nm = compiledVFrame::cast(vf)->code();
   vf = vf->top();
   _vf = javaVFrame::cast(vf);
-  _method = methodHandle(thread(), CodeCache::find_nmethod(_fr.pc())->method());
-  assert(_method(), "should have found a method");
+  _method = CodeCache::find_nmethod(_fr.pc())->method();
+  assert(_method, "should have found a method");
 #ifndef PRODUCT
   _invocations = _method->compiled_invocation_count();
 #endif
--- a/hotspot/src/share/vm/runtime/rframe.hpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/share/vm/runtime/rframe.hpp	Thu Jul 02 14:20:36 2015 -0700
@@ -60,7 +60,7 @@
   frame fr() const                        { return _fr; }
   JavaThread* thread() const              { return _thread; }
   virtual int cost() const = 0;           // estimated inlining cost (size)
-  virtual methodHandle top_method() const  = 0;
+  virtual Method* top_method() const  = 0;
   virtual javaVFrame* top_vframe() const = 0;
   virtual nmethod* nm() const             { ShouldNotCallThis(); return NULL; }
 
@@ -79,7 +79,7 @@
  protected:
   nmethod*    _nm;
   javaVFrame* _vf;                        // top vframe; may be NULL (for most recent frame)
-  methodHandle _method;                   // top method
+  Method* _method;                        // top method
 
   CompiledRFrame(frame fr, JavaThread* thread, RFrame*const  callee);
   void init();
@@ -88,7 +88,7 @@
  public:
   CompiledRFrame(frame fr, JavaThread* thread); // for nmethod triggering its counter (callee == NULL)
   bool is_compiled() const                 { return true; }
-  methodHandle top_method() const          { return _method; }
+  Method* top_method() const               { return _method; }
   javaVFrame* top_vframe() const           { return _vf; }
   nmethod* nm() const                      { return _nm; }
   int cost() const;
@@ -98,16 +98,16 @@
 class InterpretedRFrame : public RFrame {    // interpreter frame
  protected:
   javaVFrame* _vf;                           // may be NULL (for most recent frame)
-  methodHandle   _method;
+  Method* _method;
 
   InterpretedRFrame(frame fr, JavaThread* thread, RFrame*const  callee);
   void init();
   friend class RFrame;
 
  public:
-  InterpretedRFrame(frame fr, JavaThread* thread, methodHandle m); // constructor for method triggering its invocation counter
+  InterpretedRFrame(frame fr, JavaThread* thread, Method* m); // constructor for method triggering its invocation counter
   bool is_interpreted() const                { return true; }
-  methodHandle top_method() const            { return _method; }
+  Method* top_method() const                 { return _method; }
   javaVFrame* top_vframe() const             { return _vf; }
   int cost() const;
   void print();
--- a/hotspot/src/share/vm/runtime/stubRoutines.cpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/share/vm/runtime/stubRoutines.cpp	Thu Jul 02 14:20:36 2015 -0700
@@ -126,6 +126,7 @@
 address StubRoutines::_aescrypt_decryptBlock               = NULL;
 address StubRoutines::_cipherBlockChaining_encryptAESCrypt = NULL;
 address StubRoutines::_cipherBlockChaining_decryptAESCrypt = NULL;
+address StubRoutines::_ghash_processBlocks                 = NULL;
 
 address StubRoutines::_sha1_implCompress     = NULL;
 address StubRoutines::_sha1_implCompressMB   = NULL;
@@ -137,6 +138,8 @@
 address StubRoutines::_updateBytesCRC32 = NULL;
 address StubRoutines::_crc_table_adr = NULL;
 
+address StubRoutines::_updateBytesCRC32C = NULL;
+
 address StubRoutines::_multiplyToLen = NULL;
 address StubRoutines::_squareToLen = NULL;
 address StubRoutines::_mulAdd = NULL;
@@ -174,6 +177,9 @@
     }
     CodeBuffer buffer(_code1);
     StubGenerator_generate(&buffer, false);
+    // When new stubs added we need to make sure there is some space left
+    // to catch situation when we should increase size again.
+    assert(buffer.insts_remaining() > 200, "increase code_size1");
   }
 }
 
@@ -264,6 +270,9 @@
     }
     CodeBuffer buffer(_code2);
     StubGenerator_generate(&buffer, true);
+    // When new stubs added we need to make sure there is some space left
+    // to catch situation when we should increase size again.
+    assert(buffer.insts_remaining() > 200, "increase code_size2");
   }
 
 #ifdef ASSERT
--- a/hotspot/src/share/vm/runtime/stubRoutines.hpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/share/vm/runtime/stubRoutines.hpp	Thu Jul 02 14:20:36 2015 -0700
@@ -185,6 +185,7 @@
   static address _aescrypt_decryptBlock;
   static address _cipherBlockChaining_encryptAESCrypt;
   static address _cipherBlockChaining_decryptAESCrypt;
+  static address _ghash_processBlocks;
 
   static address _sha1_implCompress;
   static address _sha1_implCompressMB;
@@ -196,6 +197,8 @@
   static address _updateBytesCRC32;
   static address _crc_table_adr;
 
+  static address _updateBytesCRC32C;
+
   static address _multiplyToLen;
   static address _squareToLen;
   static address _mulAdd;
@@ -346,6 +349,7 @@
   static address aescrypt_decryptBlock()                { return _aescrypt_decryptBlock; }
   static address cipherBlockChaining_encryptAESCrypt()  { return _cipherBlockChaining_encryptAESCrypt; }
   static address cipherBlockChaining_decryptAESCrypt()  { return _cipherBlockChaining_decryptAESCrypt; }
+  static address ghash_processBlocks() { return _ghash_processBlocks; }
 
   static address sha1_implCompress()     { return _sha1_implCompress; }
   static address sha1_implCompressMB()   { return _sha1_implCompressMB; }
@@ -357,6 +361,8 @@
   static address updateBytesCRC32()    { return _updateBytesCRC32; }
   static address crc_table_addr()      { return _crc_table_adr; }
 
+  static address updateBytesCRC32C()   { return _updateBytesCRC32C; }
+
   static address multiplyToLen()       {return _multiplyToLen; }
   static address squareToLen()         {return _squareToLen; }
   static address mulAdd()              {return _mulAdd; }
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Thu Jul 02 14:20:36 2015 -0700
@@ -829,8 +829,10 @@
      static_field(StubRoutines,                _aescrypt_decryptBlock,                        address)                               \
      static_field(StubRoutines,                _cipherBlockChaining_encryptAESCrypt,          address)                               \
      static_field(StubRoutines,                _cipherBlockChaining_decryptAESCrypt,          address)                               \
+     static_field(StubRoutines,                _ghash_processBlocks,                          address)                               \
      static_field(StubRoutines,                _updateBytesCRC32,                             address)                               \
      static_field(StubRoutines,                _crc_table_adr,                                address)                               \
+     static_field(StubRoutines,                _updateBytesCRC32C,                            address)                               \
      static_field(StubRoutines,                _multiplyToLen,                                address)                               \
      static_field(StubRoutines,                _squareToLen,                                  address)                               \
      static_field(StubRoutines,                _mulAdd,                                       address)                               \
--- a/hotspot/test/TEST.groups	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/test/TEST.groups	Thu Jul 02 14:20:36 2015 -0700
@@ -147,12 +147,16 @@
   gc/survivorAlignment \
   gc/TestGCLogRotationViaJcmd.java \
   runtime/InternalApi/ThreadCpuTimesDeadlock.java \
+  runtime/NMT/JcmdSummaryDiff.java \
+  runtime/RedefineTests/RedefineAnnotations.java
   serviceability/sa/jmap-hashcode/Test8028623.java \
   serviceability/threads/TestFalseDeadLock.java \
   compiler/codecache/jmx \
   compiler/jsr292/RedefineMethodUsedByMultipleMethodHandles.java \
   compiler/rangechecks/TestRangeCheckSmearing.java \
-  serviceability/dcmd
+  compiler/whitebox/DeoptimizeMultipleOSRTest.java \
+  serviceability/dcmd \
+  testlibrary_tests/whitebox/vm_flags
 
 # Compact 2 adds full VM tests
 compact2 = \
--- a/hotspot/test/compiler/codecache/jmx/CodeCacheUtils.java	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/test/compiler/codecache/jmx/CodeCacheUtils.java	Thu Jul 02 14:20:36 2015 -0700
@@ -21,6 +21,7 @@
  * questions.
  */
 
+import jdk.test.lib.Asserts;
 import jdk.test.lib.Utils;
 import java.lang.management.MemoryPoolMXBean;
 import javax.management.Notification;
@@ -80,19 +81,42 @@
     }
 
     /**
-     * A "non-nmethods" code heap is used by interpreter during bytecode
-     * execution, thus, it can't be predicted if this code heap usage will be
-     * increased or not. Same goes for 'All'.
+     * Checks if the usage of the code heap corresponding to 'btype' can be
+     * predicted at runtime if we disable compilation. The usage of the
+     * 'NonNMethod' code heap can not be predicted because we generate adapters
+     * and buffers at runtime. The 'MethodNonProfiled' code heap is also not
+     * predictable because we may generate compiled versions of method handle
+     * intrinsics while resolving methods at runtime. Same applies to 'All'.
      *
      * @param btype BlobType to be checked
      * @return boolean value, true if respective code heap is predictable
      */
     public static boolean isCodeHeapPredictable(BlobType btype) {
-        return btype == BlobType.MethodNonProfiled
-                || btype == BlobType.MethodProfiled;
+        return btype == BlobType.MethodProfiled;
     }
 
-    public static void disableCollectionUsageThresholds(){
+    /**
+     * Verifies that 'newValue' is equal to 'oldValue' if usage of the
+     * corresponding code heap is predictable. Checks the weaker condition
+     * 'newValue >= oldValue' if usage is not predictable because intermediate
+     * allocations may happen.
+     *
+     * @param btype BlobType of the code heap to be checked
+     * @param newValue New value to be verified
+     * @param oldValue Old value to be verified
+     * @param msg Error message if verification fails
+     */
+    public static void assertEQorGTE(BlobType btype, long newValue, long oldValue, String msg) {
+        if (CodeCacheUtils.isCodeHeapPredictable(btype)) {
+            // Usage is predictable, check strong == condition
+            Asserts.assertEQ(newValue, oldValue, msg);
+        } else {
+            // Usage is not predictable, check weaker >= condition
+            Asserts.assertGTE(newValue, oldValue, msg);
+        }
+    }
+
+    public static void disableCollectionUsageThresholds() {
         BlobType.getAvailable().stream()
                 .map(BlobType::getMemoryPool)
                 .filter(MemoryPoolMXBean::isCollectionUsageThresholdSupported)
--- a/hotspot/test/compiler/codecache/jmx/GetUsageTest.java	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/test/compiler/codecache/jmx/GetUsageTest.java	Thu Jul 02 14:20:36 2015 -0700
@@ -52,10 +52,8 @@
 
     public static void main(String[] args) throws Exception {
         for (BlobType btype : BlobType.getAvailable()) {
-            if (CodeCacheUtils.isCodeHeapPredictable(btype)) {
-                for (int allocSize = 10; allocSize < 100000; allocSize *= 10) {
-                    new GetUsageTest(btype, allocSize).runTest();
-                }
+            for (int allocSize = 10; allocSize < 100000; allocSize *= 10) {
+                new GetUsageTest(btype, allocSize).runTest();
             }
         }
     }
@@ -87,13 +85,15 @@
             for (MemoryPoolMXBean entry : predictableBeans) {
                 long diff = current.get(entry) - initial.get(entry);
                 if (entry.equals(btype.getMemoryPool())) {
-                    Asserts.assertFalse(diff <= 0L || diff > usageUpperEstimate,
-                            String.format("Pool %s usage increase was reported "
-                                    + "unexpectedly as increased by %d using "
-                                    + "allocation size %d", entry.getName(),
-                                    diff, allocateSize));
+                    if (CodeCacheUtils.isCodeHeapPredictable(btype)) {
+                        Asserts.assertFalse(diff <= 0L || diff > usageUpperEstimate,
+                                String.format("Pool %s usage increase was reported "
+                                        + "unexpectedly as increased by %d using "
+                                        + "allocation size %d", entry.getName(),
+                                        diff, allocateSize));
+                    }
                 } else {
-                    Asserts.assertEQ(diff, 0L,
+                    CodeCacheUtils.assertEQorGTE(btype, diff, 0L,
                             String.format("Pool %s usage changed unexpectedly while"
                                     + " trying to increase: %s using allocation "
                                     + "size %d", entry.getName(),
--- a/hotspot/test/compiler/codecache/jmx/PeakUsageTest.java	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/test/compiler/codecache/jmx/PeakUsageTest.java	Thu Jul 02 14:20:36 2015 -0700
@@ -52,9 +52,7 @@
 
     public static void main(String[] args) {
         for (BlobType btype : BlobType.getAvailable()) {
-            if (CodeCacheUtils.isCodeHeapPredictable(btype)) {
-                new PeakUsageTest(btype).runTest();
-            }
+            new PeakUsageTest(btype).runTest();
         }
     }
 
@@ -65,7 +63,7 @@
                 CodeCacheUtils.ALLOCATION_SIZE, btype.id);
         long newPeakUsage = bean.getPeakUsage().getUsed();
         try {
-            Asserts.assertEQ(newPeakUsage, bean.getUsage().getUsed(),
+            CodeCacheUtils.assertEQorGTE(btype, newPeakUsage, bean.getUsage().getUsed(),
                     "Peak usage does not match usage after allocation for "
                     + bean.getName());
         } finally {
@@ -73,18 +71,18 @@
                 CodeCacheUtils.WB.freeCodeBlob(addr);
             }
         }
-        Asserts.assertEQ(newPeakUsage, bean.getPeakUsage().getUsed(),
+        CodeCacheUtils.assertEQorGTE(btype, newPeakUsage, bean.getPeakUsage().getUsed(),
                 "Code cache peak usage has changed after usage decreased for "
                 + bean.getName());
         bean.resetPeakUsage();
-        Asserts.assertEQ(bean.getPeakUsage().getUsed(),
+        CodeCacheUtils.assertEQorGTE(btype, bean.getPeakUsage().getUsed(),
                 bean.getUsage().getUsed(),
                 "Code cache peak usage is not equal to usage after reset for "
                 + bean.getName());
         long addr2 = CodeCacheUtils.WB.allocateCodeBlob(
                 CodeCacheUtils.ALLOCATION_SIZE, btype.id);
         try {
-            Asserts.assertEQ(bean.getPeakUsage().getUsed(),
+            CodeCacheUtils.assertEQorGTE(btype, bean.getPeakUsage().getUsed(),
                     bean.getUsage().getUsed(),
                     "Code cache peak usage is not equal to usage after fresh "
                     + "allocation for " + bean.getName());
--- a/hotspot/test/compiler/codecache/jmx/PoolsIndependenceTest.java	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/test/compiler/codecache/jmx/PoolsIndependenceTest.java	Thu Jul 02 14:20:36 2015 -0700
@@ -97,13 +97,11 @@
             return false;
         });
         for (BlobType bt : BlobType.getAvailable()) {
-            if (CodeCacheUtils.isCodeHeapPredictable(bt)) {
-                int expectedNotificationsAmount = bt.equals(btype) ? 1 : 0;
-                Asserts.assertEQ(counters.get(bt.getMemoryPool().getName()).get(),
-                        expectedNotificationsAmount, String.format("Unexpected "
-                                + "amount of notifications for pool: %s",
-                                bt.getMemoryPool().getName()));
-            }
+            int expectedNotificationsAmount = bt.equals(btype) ? 1 : 0;
+            CodeCacheUtils.assertEQorGTE(btype, counters.get(bt.getMemoryPool().getName()).get(),
+                    expectedNotificationsAmount, String.format("Unexpected "
+                            + "amount of notifications for pool: %s",
+                            bt.getMemoryPool().getName()));
         }
         try {
             ((NotificationEmitter) ManagementFactory.getMemoryMXBean()).
--- a/hotspot/test/compiler/codecache/jmx/ThresholdNotificationsTest.java	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/test/compiler/codecache/jmx/ThresholdNotificationsTest.java	Thu Jul 02 14:20:36 2015 -0700
@@ -54,9 +54,7 @@
 
     public static void main(String[] args) {
         for (BlobType bt : BlobType.getAvailable()) {
-            if (CodeCacheUtils.isCodeHeapPredictable(bt)) {
-                new ThresholdNotificationsTest(bt).runTest();
-            }
+            new ThresholdNotificationsTest(bt).runTest();
         }
     }
 
@@ -92,7 +90,9 @@
         }
         Asserts.assertTrue(
                 Utils.waitForCondition(
-                        () -> counter == iterationsCount, WAIT_TIME),
+                        () -> (CodeCacheUtils.isCodeHeapPredictable(btype) ?
+                                (counter == iterationsCount) : (counter >= iterationsCount)),
+                        WAIT_TIME),
                 "Couldn't receive expected notifications count");
         try {
             ((NotificationEmitter) ManagementFactory.getMemoryMXBean()).
--- a/hotspot/test/compiler/codecache/jmx/UsageThresholdExceededTest.java	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/test/compiler/codecache/jmx/UsageThresholdExceededTest.java	Thu Jul 02 14:20:36 2015 -0700
@@ -51,13 +51,9 @@
     }
 
     public static void main(String[] args) {
-        int iterationsCount =
-            Integer.getInteger("jdk.test.lib.iterations", 1);
+        int iterationsCount = Integer.getInteger("jdk.test.lib.iterations", 1);
         for (BlobType btype : BlobType.getAvailable()) {
-            if (CodeCacheUtils.isCodeHeapPredictable(btype)) {
-                new UsageThresholdExceededTest(btype, iterationsCount)
-                        .runTest();
-            }
+            new UsageThresholdExceededTest(btype, iterationsCount).runTest();
         }
     }
 
@@ -67,9 +63,8 @@
         for (int i = 0; i < iterations; i++) {
             CodeCacheUtils.hitUsageThreshold(bean, btype);
         }
-        Asserts.assertEQ(bean.getUsageThresholdCount(), oldValue + iterations,
+        CodeCacheUtils.assertEQorGTE(btype, bean.getUsageThresholdCount(), oldValue + iterations,
                 "Unexpected threshold usage count");
-        System.out.printf("INFO: Scenario finished successfully for %s%n",
-                bean.getName());
+        System.out.printf("INFO: Scenario finished successfully for %s%n", bean.getName());
     }
 }
--- a/hotspot/test/compiler/codecache/jmx/UsageThresholdIncreasedTest.java	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/test/compiler/codecache/jmx/UsageThresholdIncreasedTest.java	Thu Jul 02 14:20:36 2015 -0700
@@ -53,14 +53,12 @@
 
     public static void main(String[] args) {
         for (BlobType btype : BlobType.getAvailable()) {
-            if (CodeCacheUtils.isCodeHeapPredictable(btype)) {
-                new UsageThresholdIncreasedTest(btype).runTest();
-            }
+            new UsageThresholdIncreasedTest(btype).runTest();
         }
     }
 
     private void checkUsageThresholdCount(MemoryPoolMXBean bean, long count){
-        Asserts.assertEQ(bean.getUsageThresholdCount(), count,
+        CodeCacheUtils.assertEQorGTE(btype, bean.getUsageThresholdCount(), count,
                 String.format("Usage threshold was hit: %d times for %s "
                         + "Threshold value: %d with current usage: %d",
                         bean.getUsageThresholdCount(), bean.getName(),
--- a/hotspot/test/compiler/codecache/jmx/UsageThresholdNotExceededTest.java	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/test/compiler/codecache/jmx/UsageThresholdNotExceededTest.java	Thu Jul 02 14:20:36 2015 -0700
@@ -50,9 +50,7 @@
 
     public static void main(String[] args) {
         for (BlobType btype : BlobType.getAvailable()) {
-            if (CodeCacheUtils.isCodeHeapPredictable(btype)) {
-                new UsageThresholdNotExceededTest(btype).runTest();
-            }
+            new UsageThresholdNotExceededTest(btype).runTest();
         }
     }
 
@@ -65,13 +63,11 @@
                 - CodeCacheUtils.getHeaderSize(btype), btype.id);
         // a gc cycle triggers usage threshold recalculation
         CodeCacheUtils.WB.fullGC();
-        Asserts.assertEQ(bean.getUsageThresholdCount(), initialThresholdCount,
-                String.format("Usage threshold was hit: %d  times for %s. "
+        CodeCacheUtils.assertEQorGTE(btype, bean.getUsageThresholdCount(), initialThresholdCount,
+                String.format("Usage threshold was hit: %d times for %s. "
                         + "Threshold value: %d with current usage: %d",
                         bean.getUsageThresholdCount(), bean.getName(),
                         bean.getUsageThreshold(), bean.getUsage().getUsed()));
-
-        System.out.println("INFO: Case finished successfully for "
-                + bean.getName());
+        System.out.println("INFO: Case finished successfully for " + bean.getName());
     }
 }
--- a/hotspot/test/compiler/codegen/7184394/TestAESBase.java	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/test/compiler/codegen/7184394/TestAESBase.java	Thu Jul 02 14:20:36 2015 -0700
@@ -31,6 +31,7 @@
 import java.util.Random;
 import javax.crypto.Cipher;
 import javax.crypto.SecretKey;
+import javax.crypto.spec.GCMParameterSpec;
 import javax.crypto.spec.IvParameterSpec;
 import javax.crypto.spec.SecretKeySpec;
 
@@ -62,6 +63,10 @@
   Cipher dCipher;
   AlgorithmParameters algParams;
   SecretKey key;
+  GCMParameterSpec gcm_spec;
+  byte[] aad;
+  int tlen = 12;
+  byte[] iv;
 
   static int numThreads = 0;
   int  threadId;
@@ -100,6 +105,12 @@
         int ivLen = (algorithm.equals("AES") ? 16 : algorithm.equals("DES") ? 8 : 0);
         IvParameterSpec initVector = new IvParameterSpec(new byte[ivLen]);
         cipher.init(Cipher.ENCRYPT_MODE, key, initVector);
+      } else if (mode.equals("GCM")) {
+          iv = new byte[64];
+          random.nextBytes(iv);
+          aad = new byte[5];
+          random.nextBytes(aad);
+          gcm_init();
       } else {
         algParams = cipher.getParameters();
         cipher.init(Cipher.ENCRYPT_MODE, key, algParams);
@@ -186,4 +197,12 @@
   }
 
   abstract void childShowCipher();
+
+  void gcm_init() throws Exception {
+    tlen = 12;
+    gcm_spec = new GCMParameterSpec(tlen * 8, iv);
+    cipher = Cipher.getInstance(algorithm + "/" + mode + "/" + paddingStr, "SunJCE");
+    cipher.init(Cipher.ENCRYPT_MODE, key, gcm_spec);
+    cipher.update(aad);
+  }
 }
--- a/hotspot/test/compiler/codegen/7184394/TestAESEncode.java	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/test/compiler/codegen/7184394/TestAESEncode.java	Thu Jul 02 14:20:36 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,7 +32,11 @@
   @Override
   public void run() {
     try {
-      if (!noReinit) cipher.init(Cipher.ENCRYPT_MODE, key, algParams);
+      if (mode.equals("GCM")) {
+        gcm_init();
+      } else if (!noReinit) {
+        cipher.init(Cipher.ENCRYPT_MODE, key, algParams);
+      }
       encode = new byte[encodeLength];
       if (testingMisalignment) {
         int tempSize = cipher.update(input, encInputOffset, (msgSize - lastChunkSize), encode, encOutputOffset);
--- a/hotspot/test/compiler/codegen/7184394/TestAESMain.java	Thu Jul 02 18:07:42 2015 +0200
+++ b/hotspot/test/compiler/codegen/7184394/TestAESMain.java	Thu Jul 02 14:20:36 2015 -0700
@@ -44,6 +44,13 @@
  * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=ECB -DencInputOffset=1 -DencOutputOffset=1 TestAESMain
  * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=ECB -DencInputOffset=1 -DencOutputOffset=1 -DdecOutputOffset=1 TestAESMain
  * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=ECB -DencInputOffset=1 -DencOutputOffset=1 -DdecOutputOffset=1 -DpaddingStr=NoPadding -DmsgSize=640 TestAESMain
+ * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=GCM TestAESMain
+ * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=GCM -DencInputOffset=1 TestAESMain
+ * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=GCM -DencOutputOffset=1 TestAESMain
+ * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=GCM -DdecOutputOffset=1 TestAESMain
+ * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=GCM -DencInputOffset=1 -DencOutputOffset=1 TestAESMain
+ * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=GCM -DencInputOffset=1 -DencOutputOffset=1 -DdecOutputOffset=1 TestAESMain
+ * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=GCM -DencInputOffset=1 -DencOutputOffset=1 -DdecOutputOffset=1 -DpaddingStr=NoPadding -DmsgSize=640 TestAESMain
  *
  * @author Tom Deneau
  */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/intrinsics/crc32c/TestCRC32C.java	Thu Jul 02 14:20:36 2015 -0700
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8073583
+ * @summary C2 support for CRC32C on SPARC
+ *
+ * @run main/othervm/timeout=600 -Xbatch TestCRC32C -m
+ */
+
+import java.nio.ByteBuffer;
+import java.util.zip.Checksum;
+import java.util.zip.CRC32C;
+
+public class TestCRC32C {
+    public static void main(String[] args) {
+        int offset = Integer.getInteger("offset", 0);
+        int msgSize = Integer.getInteger("msgSize", 512);
+        boolean multi = false;
+        int iters = 20000;
+        int warmupIters = 20000;
+
+        if (args.length > 0) {
+            if (args[0].equals("-m")) {
+                multi = true;
+            } else {
+                iters = Integer.valueOf(args[0]);
+            }
+            if (args.length > 1) {
+                warmupIters = Integer.valueOf(args[1]);
+            }
+        }
+
+        if (multi) {
+            test_multi(warmupIters);
+            return;
+        }
+
+        System.out.println(" offset = " + offset);
+        System.out.println("msgSize = " + msgSize + " bytes");
+        System.out.println("  iters = " + iters);
+
+        byte[] b = initializedBytes(msgSize, offset);
+
+        CRC32C crc0 = new CRC32C();
+        CRC32C crc1 = new CRC32C();
+        CRC32C crc2 = new CRC32C();
+
+        crc0.update(b, offset, msgSize);
+
+        System.out.println("-------------------------------------------------------");
+
+        /* warm up */
+        for (int i = 0; i < warmupIters; i++) {
+            crc1.reset();
+            crc1.update(b, offset, msgSize);
+        }
+
+        /* measure performance */
+        long start = System.nanoTime();
+        for (int i = 0; i < iters; i++) {
+            crc1.reset();
+            crc1.update(b, offset, msgSize);
+        }
+        long end = System.nanoTime();
+        double total = (double)(end - start)/1e9;         // in seconds
+        double thruput = (double)msgSize*iters/1e6/total; // in MB/s
+        System.out.println("CRC32C.update(byte[]) runtime = " + total + " seconds");
+        System.out.println("CRC32C.update(byte[]) throughput = " + thruput + " MB/s");
+
+        /* check correctness */
+        for (int i = 0; i < iters; i++) {
+            crc1.reset();
+            crc1.update(b, offset, msgSize);
+            if (!check(crc0, crc1)) break;
+        }
+        report("CRCs", crc0, crc1);
+
+        System.out.println("-------------------------------------------------------");
+
+        ByteBuffer buf = ByteBuffer.allocateDirect(msgSize);
+        buf.put(b, offset, msgSize);
+        buf.flip();
+
+        /* warm up */
+        for (int i = 0; i < warmupIters; i++) {
+            crc2.reset();
+            crc2.update(buf);
+            buf.rewind();
+        }
+
+        /* measure performance */
+        start = System.nanoTime();
+        for (int i = 0; i < iters; i++) {
+            crc2.reset();
+            crc2.update(buf);
+            buf.rewind();
+        }
+        end = System.nanoTime();
+        total = (double)(end - start)/1e9;         // in seconds
+        thruput = (double)msgSize*iters/1e6/total; // in MB/s
+        System.out.println("CRC32C.update(ByteBuffer) runtime = " + total + " seconds");
+        System.out.println("CRC32C.update(ByteBuffer) throughput = " + thruput + " MB/s");
+
+        /* check correctness */
+        for (int i = 0; i < iters; i++) {
+            crc2.reset();
+            crc2.update(buf);
+            buf.rewind();
+            if (!check(crc0, crc2)) break;
+        }
+        report("CRCs", crc0, crc2);
+
+        System.out.println("-------------------------------------------------------");
+    }
+
+    private static void report(String s, Checksum crc0, Checksum crc1) {
+        System.out.printf("%s: crc0 = %08x, crc1 = %08x\n",
+                          s, crc0.getValue(), crc1.getValue());
+    }
+
+    private static boolean check(Checksum crc0, Checksum crc1) {
+        if (crc0.getValue() != crc1.getValue()) {
+            System.err.printf("ERROR: crc0 = %08x, crc1 = %08x\n",
+                              crc0.getValue(), crc1.getValue());
+            return false;
+        }
+        return true;
+    }
+
+    private static byte[] initializedBytes(int M, int offset) {
+        byte[] bytes = new byte[M + offset];
+        for (int i = 0; i < offset; i++) {
+            bytes[i] = (byte) i;
+        }
+        for (int i = offset; i < bytes.length; i++) {
+            bytes[i] = (byte) (i - offset);
+        }
+        return bytes;
+    }
+
+    private static void test_multi(int iters) {
+        int len1 = 8;    // the  8B/iteration loop
+        int len2 = 32;   // the 32B/iteration loop
+        int len3 = 4096; // the 4KB/iteration loop
+
+        byte[] b = initializedBytes(len3*16, 0);
+        int[] offsets = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 16, 32, 64, 128, 256, 512 };
+        int[] sizes = { 0, 1, 2, 3, 4, 5, 6, 7,
+                        len1, len1+1, len1+2, len1+3, len1+4, len1+5, len1+6, len1+7,
+                        len1*2, len1*2+1, len1*2+3, len1*2+5, len1*2+7,
+                        len2, len2+1, len2+3, len2+5, len2+7,
+                        len2*2, len2*4, len2*8, len2*16, len2*32, len2*64,
+                        len3, len3+1, len3+3, len3+5, len3+7,
+                        len3*2, len3*4, len3*8,
+                        len1+len2, len1+len2+1, len1+len2+3, len1+len2+5, len1+len2+7,
+                        len1+len3, len1+len3+1, len1+len3+3, len1+len3+5, len1+len3+7,
+                        len2+len3, len2+len3+1, len2+len3+3, len2+len3+5, len2+len3+7,
+                        len1+len2+len3, len1+len2+len3+1, len1+len2+len3+3,
+                        len1+len2+len3+5, len1+len2+len3+7,
+                        (len1+len2+len3)*2, (len1+len2+len3)*2+1, (len1+len2+len3)*2+3,
+                        (len1+len2+len3)*2+5, (len1+len2+len3)*2+7,
+                        (len1+len2+len3)*3, (len1+len2+len3)*3-1, (len1+len2+len3)*3-3,
+                        (len1+len2+len3)*3-5, (len1+len2+len3)*3-7 };
+        CRC32C[] crc0 = new CRC32C[offsets.length*sizes.length];
+        CRC32C[] crc1 = new CRC32C[offsets.length*sizes.length];
+        int i, j, k;
+
+        System.out.printf("testing %d cases ...\n", offsets.length*sizes.length);
+
+        /* set the result from interpreter as reference */
+        for (i = 0; i < offsets.length; i++) {
+            for (j = 0; j < sizes.length; j++) {
+                crc0[i*sizes.length + j] = new CRC32C();
+                crc1[i*sizes.length + j] = new CRC32C();
+                crc0[i*sizes.length + j].update(b, offsets[i], sizes[j]);
+            }
+        }
+
+        /* warm up the JIT compiler and get result */
+        for (k = 0; k < iters; k++) {
+            for (i = 0; i < offsets.length; i++) {
+                for (j = 0; j < sizes.length; j++) {
+                    crc1[i*sizes.length + j].reset();
+                    crc1[i*sizes.length + j].update(b, offsets[i], sizes[j]);
+                }
+            }
+        }
+
+        /* check correctness */
+        for (i = 0; i < offsets.length; i++) {
+            for (j = 0; j < sizes.length; j++) {
+                if (!check(crc0[i*sizes.length + j], crc1[i*sizes.length + j])) {
+                    System.out.printf("offsets[%d] = %d", i, offsets[i]);
+                    System.out.printf("\tsizes[%d] = %d\n", j, sizes[j]);
+                }
+            }
+        }
+    }
+}