8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
authorjcivlin
Tue, 26 Apr 2016 21:54:21 -0700
changeset 38135 e06e2d071465
parent 38134 7435f311b441
child 38136 e11f9d894322
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available) Reviewed-by: kvn
hotspot/src/cpu/x86/vm/assembler_x86.cpp
hotspot/src/cpu/x86/vm/assembler_x86.hpp
hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp
hotspot/src/cpu/x86/vm/macroAssembler_x86_sha.cpp
hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp
hotspot/src/cpu/x86/vm/stubRoutines_x86.cpp
hotspot/src/cpu/x86/vm/stubRoutines_x86.hpp
hotspot/src/cpu/x86/vm/vm_version_x86.cpp
hotspot/test/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForUnsupportedX86CPU.java
hotspot/test/compiler/testlibrary/sha/predicate/IntrinsicPredicates.java
hotspot/test/testlibrary/jdk/test/lib/cli/predicate/CPUSpecificPredicate.java
--- a/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Tue Apr 26 20:43:59 2016 -0700
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Tue Apr 26 21:54:21 2016 -0700
@@ -3211,6 +3211,16 @@
   emit_int8(imm8);
 }
 
+void Assembler::vperm2i128(XMMRegister dst,  XMMRegister nds, XMMRegister src, int imm8) {
+  assert(VM_Version::supports_avx2(), "");
+  InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+  int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
+  emit_int8(0x46);
+  emit_int8(0xC0 | encode);
+  emit_int8(imm8);
+}
+
+
 void Assembler::pause() {
   emit_int8((unsigned char)0xF3);
   emit_int8((unsigned char)0x90);
@@ -3679,6 +3689,16 @@
   emit_int8((unsigned char)(0xC0 | encode));
 }
 
+void Assembler::vpshufb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
+  assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
+         vector_len == AVX_256bit? VM_Version::supports_avx2() :
+         0, "");
+  InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
+  int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+  emit_int8(0x00);
+  emit_int8((unsigned char)(0xC0 | encode));
+}
+
 void Assembler::pshufb(XMMRegister dst, Address src) {
   assert(VM_Version::supports_ssse3(), "");
   InstructionMark im(this);
@@ -3700,6 +3720,18 @@
   emit_int8(mode & 0xFF);
 }
 
+void Assembler::vpshufd(XMMRegister dst, XMMRegister src, int mode, int vector_len) {
+  assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
+         vector_len == AVX_256bit? VM_Version::supports_avx2() :
+         0, "");
+  NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+  InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+  emit_int8(0x70);
+  emit_int8((unsigned char)(0xC0 | encode));
+  emit_int8(mode & 0xFF);
+}
+
 void Assembler::pshufd(XMMRegister dst, Address src, int mode) {
   assert(isByte(mode), "invalid value");
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
@@ -3740,7 +3772,6 @@
   // Shift left 128 bit value in dst XMMRegister by shift number of bytes.
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
-  // XMM3 is for /3 encoding: 66 0F 73 /3 ib
   int encode = simd_prefix_and_encode(xmm3, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x73);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -4023,6 +4054,17 @@
   emit_int8(imm8);
 }
 
+void Assembler::vpalignr(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
+  assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
+         vector_len == AVX_256bit? VM_Version::supports_avx2() :
+         0, "");
+  InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
+  int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
+  emit_int8((unsigned char)0x0F);
+  emit_int8((unsigned char)(0xC0 | encode));
+  emit_int8(imm8);
+}
+
 void Assembler::pblendw(XMMRegister dst, XMMRegister src, int imm8) {
   assert(VM_Version::supports_sse4_1(), "");
   InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
@@ -8305,6 +8347,15 @@
   emit_int8(imm8);
 }
 
+void Assembler::rorxd(Register dst, Register src, int imm8) {
+  assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
+  InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+  int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, &attributes);
+  emit_int8((unsigned char)0xF0);
+  emit_int8((unsigned char)(0xC0 | encode));
+  emit_int8(imm8);
+}
+
 void Assembler::sarq(Register dst, int imm8) {
   assert(isShiftCount(imm8 >> 1), "illegal shift count");
   int encode = prefixq_and_encode(dst->encoding());
--- a/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Tue Apr 26 20:43:59 2016 -0700
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Tue Apr 26 21:54:21 2016 -0700
@@ -1522,6 +1522,7 @@
   // Pemutation of 64bit words
   void vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len);
   void vpermq(XMMRegister dst, XMMRegister src, int imm8);
+  void vperm2i128(XMMRegister dst,  XMMRegister nds, XMMRegister src, int imm8);
 
   void pause();
 
@@ -1606,10 +1607,12 @@
   // Shuffle Bytes
   void pshufb(XMMRegister dst, XMMRegister src);
   void pshufb(XMMRegister dst, Address src);
+  void vpshufb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
 
   // Shuffle Packed Doublewords
   void pshufd(XMMRegister dst, XMMRegister src, int mode);
   void pshufd(XMMRegister dst, Address src,     int mode);
+  void vpshufd(XMMRegister dst, XMMRegister src, int mode, int vector_len);
 
   // Shuffle Packed Low Words
   void pshuflw(XMMRegister dst, XMMRegister src, int mode);
@@ -1661,6 +1664,7 @@
 #ifdef _LP64
   void rorq(Register dst, int imm8);
   void rorxq(Register dst, Register src, int imm8);
+  void rorxd(Register dst, Register src, int imm8);
 #endif
 
   void sahf();
@@ -1684,6 +1688,8 @@
   void setb(Condition cc, Register dst);
 
   void palignr(XMMRegister dst, XMMRegister src, int imm8);
+  void vpalignr(XMMRegister dst, XMMRegister src1, XMMRegister src2, int imm8, int vector_len);
+
   void pblendw(XMMRegister dst, XMMRegister src, int imm8);
 
   void sha1rnds4(XMMRegister dst, XMMRegister src, int imm8);
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp	Tue Apr 26 20:43:59 2016 -0700
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp	Tue Apr 26 21:54:21 2016 -0700
@@ -906,6 +906,45 @@
   void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
   void ldmxcsr(AddressLiteral src);
 
+#ifdef _LP64
+ private:
+  void sha256_AVX2_one_round_compute(
+    Register  reg_old_h,
+    Register  reg_a,
+    Register  reg_b,
+    Register  reg_c,
+    Register  reg_d,
+    Register  reg_e,
+    Register  reg_f,
+    Register  reg_g,
+    Register  reg_h,
+    int iter);
+  void sha256_AVX2_four_rounds_compute_first(int start);
+  void sha256_AVX2_four_rounds_compute_last(int start);
+  void sha256_AVX2_one_round_and_sched(
+        XMMRegister xmm_0,     /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */
+        XMMRegister xmm_1,     /* ymm5 */  /* full cycle is 16 iterations */
+        XMMRegister xmm_2,     /* ymm6 */
+        XMMRegister xmm_3,     /* ymm7 */
+        Register    reg_a,      /* == eax on 0 iteration, then rotate 8 register right on each next iteration */
+        Register    reg_b,      /* ebx */    /* full cycle is 8 iterations */
+        Register    reg_c,      /* edi */
+        Register    reg_d,      /* esi */
+        Register    reg_e,      /* r8d */
+        Register    reg_f,      /* r9d */
+        Register    reg_g,      /* r10d */
+        Register    reg_h,      /* r11d */
+        int iter);
+
+  void addm(int disp, Register r1, Register r2);
+
+ public:
+  void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
+                   XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
+                   Register buf, Register state, Register ofs, Register limit, Register rsp,
+                   bool multi_block, XMMRegister shuf_mask);
+#endif
+
   void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0,
                  XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask,
                  Register buf, Register state, Register ofs, Register limit, Register rsp,
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86_sha.cpp	Tue Apr 26 20:43:59 2016 -0700
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86_sha.cpp	Tue Apr 26 21:54:21 2016 -0700
@@ -493,3 +493,543 @@
   bind(done_hash);
 
 }
+
+#ifdef _LP64
+/*
+  The algorithm below is based on Intel publication:
+  "Fast SHA-256 Implementations on Intelë Architecture Processors" by Jim Guilford, Kirk Yap and Vinodh Gopal.
+  The assembly code was originally provided by Sean Gulley and in many places preserves
+  the original assembly NAMES and comments to simplify matching Java assembly with its original.
+  The Java version was substantially redesigned to replace 1200 assembly instruction with
+  much shorter run-time generator of the same code in memory.
+*/
+
+void MacroAssembler::sha256_AVX2_one_round_compute(
+    Register  reg_old_h,
+    Register  reg_a,
+    Register  reg_b,
+    Register  reg_c,
+    Register  reg_d,
+    Register  reg_e,
+    Register  reg_f,
+    Register  reg_g,
+    Register  reg_h,
+    int iter) {
+  const Register& reg_y0     = r13;
+  const Register& reg_y1     = r14;
+  const Register& reg_y2     = r15;
+  const Register& reg_y3     = rcx;
+  const Register& reg_T1     = r12;
+  //;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND iter ;;;;;;;;;;;;;;;;;;;;;;;;;;;
+  if (iter%4 > 0) {
+    addl(reg_old_h, reg_y2);   // reg_h = k + w + reg_h + S0 + S1 + CH = t1 + S0; --
+  }
+  movl(reg_y2, reg_f);         // reg_y2 = reg_f                                ; CH
+  rorxd(reg_y0, reg_e, 25);    // reg_y0 = reg_e >> 25   ; S1A
+  rorxd(reg_y1, reg_e, 11);    // reg_y1 = reg_e >> 11    ; S1B
+  xorl(reg_y2, reg_g);         // reg_y2 = reg_f^reg_g                              ; CH
+
+  xorl(reg_y0, reg_y1);        // reg_y0 = (reg_e>>25) ^ (reg_h>>11)  ; S1
+  rorxd(reg_y1, reg_e, 6);     // reg_y1 = (reg_e >> 6)    ; S1
+  andl(reg_y2, reg_e);         // reg_y2 = (reg_f^reg_g)&reg_e                          ; CH
+
+  if (iter%4 > 0) {
+    addl(reg_old_h, reg_y3);   // reg_h = t1 + S0 + MAJ                     ; --
+  }
+
+  xorl(reg_y0, reg_y1);       // reg_y0 = (reg_e>>25) ^ (reg_e>>11) ^ (reg_e>>6) ; S1
+  rorxd(reg_T1, reg_a, 13);   // reg_T1 = reg_a >> 13    ; S0B
+  xorl(reg_y2, reg_g);        // reg_y2 = CH = ((reg_f^reg_g)&reg_e)^reg_g                 ; CH
+  rorxd(reg_y1, reg_a, 22);   // reg_y1 = reg_a >> 22    ; S0A
+  movl(reg_y3, reg_a);        // reg_y3 = reg_a                                ; MAJA
+
+  xorl(reg_y1, reg_T1);       // reg_y1 = (reg_a>>22) ^ (reg_a>>13)  ; S0
+  rorxd(reg_T1, reg_a, 2);    // reg_T1 = (reg_a >> 2)    ; S0
+  addl(reg_h, Address(rsp, rdx, Address::times_1, 4*iter)); // reg_h = k + w + reg_h ; --
+  orl(reg_y3, reg_c);         // reg_y3 = reg_a|reg_c                              ; MAJA
+
+  xorl(reg_y1, reg_T1);       // reg_y1 = (reg_a>>22) ^ (reg_a>>13) ^ (reg_a>>2) ; S0
+  movl(reg_T1, reg_a);        // reg_T1 = reg_a                                ; MAJB
+  andl(reg_y3, reg_b);        // reg_y3 = (reg_a|reg_c)&reg_b                          ; MAJA
+  andl(reg_T1, reg_c);        // reg_T1 = reg_a&reg_c                              ; MAJB
+  addl(reg_y2, reg_y0);       // reg_y2 = S1 + CH                          ; --
+
+
+  addl(reg_d, reg_h);         // reg_d = k + w + reg_h + reg_d                     ; --
+  orl(reg_y3, reg_T1);        // reg_y3 = MAJ = (reg_a|reg_c)&reg_b)|(reg_a&reg_c)             ; MAJ
+  addl(reg_h, reg_y1);        // reg_h = k + w + reg_h + S0                    ; --
+
+  addl(reg_d, reg_y2);        // reg_d = k + w + reg_h + reg_d + S1 + CH = reg_d + t1  ; --
+
+
+  if (iter%4 == 3) {
+    addl(reg_h, reg_y2);      // reg_h = k + w + reg_h + S0 + S1 + CH = t1 + S0; --
+    addl(reg_h, reg_y3);      // reg_h = t1 + S0 + MAJ                     ; --
+  }
+}
+
+void MacroAssembler::sha256_AVX2_four_rounds_compute_first(int start) {
+    sha256_AVX2_one_round_compute(rax, rax, rbx, rdi, rsi,  r8,  r9, r10, r11, start + 0);
+    sha256_AVX2_one_round_compute(r11, r11, rax, rbx, rdi, rsi,  r8,  r9, r10, start + 1);
+    sha256_AVX2_one_round_compute(r10, r10, r11, rax, rbx, rdi, rsi,  r8,  r9, start + 2);
+    sha256_AVX2_one_round_compute(r9,  r9,  r10, r11, rax, rbx, rdi, rsi,  r8, start + 3);
+}
+
+void MacroAssembler::sha256_AVX2_four_rounds_compute_last(int start) {
+    sha256_AVX2_one_round_compute(r8,  r8,   r9, r10, r11, rax, rbx, rdi, rsi, start + 0);
+    sha256_AVX2_one_round_compute(rsi, rsi,  r8,  r9, r10, r11, rax, rbx, rdi, start + 1);
+    sha256_AVX2_one_round_compute(rdi, rdi, rsi,  r8,  r9, r10, r11, rax, rbx, start + 2);
+    sha256_AVX2_one_round_compute(rbx, rbx, rdi, rsi,  r8,  r9, r10, r11, rax, start + 3);
+}
+
+void MacroAssembler::sha256_AVX2_one_round_and_sched(
+        XMMRegister  xmm_0,     /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */
+        XMMRegister  xmm_1,     /* ymm5 */  /* full cycle is 16 iterations */
+        XMMRegister  xmm_2,     /* ymm6 */
+        XMMRegister  xmm_3,     /* ymm7 */
+        Register  reg_a,        /* == rax on 0 iteration, then rotate 8 register right on each next iteration */
+        Register  reg_b,        /* rbx */    /* full cycle is 8 iterations */
+        Register  reg_c,        /* rdi */
+        Register  reg_d,        /* rsi */
+        Register  reg_e,        /* r8 */
+        Register  reg_f,        /* r9d */
+        Register  reg_g,        /* r10d */
+        Register  reg_h,        /* r11d */
+        int iter)
+{
+  movl(rcx, reg_a);           // rcx = reg_a               ; MAJA
+  rorxd(r13, reg_e, 25);      // r13 = reg_e >> 25    ; S1A
+  rorxd(r14, reg_e, 11);      //  r14 = reg_e >> 11    ; S1B
+  addl(reg_h, Address(rsp, rdx, Address::times_1, 4*iter));
+  orl(rcx, reg_c);            // rcx = reg_a|reg_c          ; MAJA
+
+  movl(r15, reg_f);           // r15 = reg_f               ; CH
+  rorxd(r12, reg_a, 13);      // r12 = reg_a >> 13      ; S0B
+  xorl(r13, r14);             // r13 = (reg_e>>25) ^ (reg_e>>11)  ; S1
+  xorl(r15, reg_g);           // r15 = reg_f^reg_g         ; CH
+
+  rorxd(r14, reg_e, 6);       // r14 = (reg_e >> 6)    ; S1
+  andl(r15, reg_e);           // r15 = (reg_f^reg_g)&reg_e ; CH
+
+  xorl(r13, r14);             // r13 = (reg_e>>25) ^ (reg_e>>11) ^ (reg_e>>6) ; S1
+  rorxd(r14, reg_a, 22);      // r14 = reg_a >> 22    ; S0A
+  addl(reg_d, reg_h);         // reg_d = k + w + reg_h + reg_d                     ; --
+
+  andl(rcx, reg_b);          // rcx = (reg_a|reg_c)&reg_b                          ; MAJA
+  xorl(r14, r12);            // r14 = (reg_a>>22) ^ (reg_a>>13)  ; S0
+
+  rorxd(r12, reg_a, 2);      // r12 = (reg_a >> 2)    ; S0
+  xorl(r15, reg_g);          // r15 = CH = ((reg_f^reg_g)&reg_e)^reg_g                 ; CH
+
+  xorl(r14, r12);            // r14 = (reg_a>>22) ^ (reg_a>>13) ^ (reg_a>>2) ; S0
+  movl(r12, reg_a);          // r12 = reg_a                                ; MAJB
+  andl(r12, reg_c);          // r12 = reg_a&reg_c                              ; MAJB
+  addl(r15, r13);            // r15 = S1 + CH                          ; --
+
+  orl(rcx, r12);             // rcx = MAJ = (reg_a|reg_c)&reg_b)|(reg_a&reg_c)             ; MAJ
+  addl(reg_h, r14);          // reg_h = k + w + reg_h + S0                    ; --
+  addl(reg_d, r15);          // reg_d = k + w + reg_h + reg_d + S1 + CH = reg_d + t1  ; --
+
+  addl(reg_h, r15);          // reg_h = k + w + reg_h + S0 + S1 + CH = t1 + S0; --
+  addl(reg_h, rcx);          // reg_h = t1 + S0 + MAJ                     ; --
+
+  if (iter%4 == 0) {
+    vpalignr(xmm0, xmm_3, xmm_2, 4, AVX_256bit);   // ymm0 = W[-7]
+    vpaddd(xmm0, xmm0, xmm_0, AVX_256bit);         // ymm0 = W[-7] + W[-16]; y1 = (e >> 6)     ; S1
+    vpalignr(xmm1, xmm_1, xmm_0, 4, AVX_256bit);   // ymm1 = W[-15]
+    vpsrld(xmm2, xmm1, 7, AVX_256bit);
+    vpslld(xmm3, xmm1, 32-7, AVX_256bit);
+    vpor(xmm3, xmm3, xmm2, AVX_256bit);            // ymm3 = W[-15] ror 7
+    vpsrld(xmm2, xmm1,18, AVX_256bit);
+  } else if (iter%4 == 1 ) {
+    vpsrld(xmm8, xmm1, 3, AVX_256bit);             // ymm8 = W[-15] >> 3
+    vpslld(xmm1, xmm1, 32-18, AVX_256bit);
+    vpxor(xmm3, xmm3, xmm1, AVX_256bit);
+    vpxor(xmm3, xmm3, xmm2, AVX_256bit);           // ymm3 = W[-15] ror 7 ^ W[-15] ror 18
+    vpxor(xmm1, xmm3, xmm8, AVX_256bit);           // ymm1 = s0
+    vpshufd(xmm2, xmm_3, 0xFA, AVX_256bit);        // 11111010b ; ymm2 = W[-2] {BBAA}
+    vpaddd(xmm0, xmm0, xmm1, AVX_256bit);          // ymm0 = W[-16] + W[-7] + s0
+    vpsrld(xmm8, xmm2, 10, AVX_256bit);            // ymm8 = W[-2] >> 10 {BBAA}
+  } else if (iter%4 == 2) {
+    vpsrlq(xmm3, xmm2, 19, AVX_256bit);            // ymm3 = W[-2] ror 19 {xBxA}
+    vpsrlq(xmm2, xmm2, 17, AVX_256bit);            // ymm2 = W[-2] ror 17 {xBxA}
+    vpxor(xmm2, xmm2, xmm3, AVX_256bit);
+    vpxor(xmm8, xmm8, xmm2, AVX_256bit);           // ymm8 = s1 {xBxA}
+    vpshufb(xmm8, xmm8, xmm10, AVX_256bit);        // ymm8 = s1 {00BA}
+    vpaddd(xmm0, xmm0, xmm8, AVX_256bit);          // ymm0 = {..., ..., W[1], W[0]}
+    vpshufd(xmm2, xmm0, 0x50, AVX_256bit);         // 01010000b ; ymm2 = W[-2] {DDCC}
+  } else if (iter%4 == 3) {
+    vpsrld(xmm11, xmm2, 10, AVX_256bit);           // ymm11 = W[-2] >> 10 {DDCC}
+    vpsrlq(xmm3, xmm2, 19, AVX_256bit);            // ymm3 = W[-2] ror 19 {xDxC}
+    vpsrlq(xmm2, xmm2, 17, AVX_256bit);            // ymm2 = W[-2] ror 17 {xDxC}
+    vpxor(xmm2, xmm2, xmm3, AVX_256bit);
+    vpxor(xmm11, xmm11, xmm2, AVX_256bit);         // ymm11 = s1 {xDxC}
+    vpshufb(xmm11, xmm11, xmm12, AVX_256bit);      // ymm11 = s1 {DC00}
+    vpaddd(xmm_0, xmm11, xmm0, AVX_256bit);        // xmm_0 = {W[3], W[2], W[1], W[0]}
+  }
+}
+
+void MacroAssembler::addm(int disp, Register r1, Register r2) {
+  addl(r2, Address(r1, disp));
+  movl(Address(r1, disp), r2);
+}
+
+void MacroAssembler::sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
+  XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
+  Register buf, Register state, Register ofs, Register limit, Register rsp,
+  bool multi_block, XMMRegister shuf_mask) {
+
+  Label loop0, loop1, loop2, loop3,
+        last_block_enter, do_last_block, only_one_block, done_hash,
+        compute_size, compute_size_end,
+        compute_size1, compute_size_end1;
+
+  address K256_W = StubRoutines::x86::k256_W_addr();
+  address pshuffle_byte_flip_mask = StubRoutines::x86::pshuffle_byte_flip_mask_addr();
+  address pshuffle_byte_flip_mask_addr = 0;
+
+const XMMRegister& SHUF_00BA        = xmm10;    // ymm10: shuffle xBxA -> 00BA
+const XMMRegister& SHUF_DC00        = xmm12;    // ymm12: shuffle xDxC -> DC00
+const XMMRegister& BYTE_FLIP_MASK   = xmm13;   // ymm13
+
+const XMMRegister& X_BYTE_FLIP_MASK = xmm13;   //XMM version of BYTE_FLIP_MASK
+
+const Register& NUM_BLKS = r8;   // 3rd arg
+const Register& CTX      = rdx;  // 2nd arg
+const Register& INP      = rcx;  // 1st arg
+
+const Register& c        = rdi;
+const Register& d        = rsi;
+const Register& e        = r8;    // clobbers NUM_BLKS
+const Register& y3       = rcx;  // clobbers INP
+
+const Register& TBL      = rbp;
+const Register& SRND     = CTX;   // SRND is same register as CTX
+
+const Register& a        = rax;
+const Register& b        = rbx;
+const Register& f        = r9;
+const Register& g        = r10;
+const Register& h        = r11;
+
+const Register& T1       = r12;
+const Register& y0       = r13;
+const Register& y1       = r14;
+const Register& y2       = r15;
+
+
+enum {
+  _XFER_SIZE = 2*64*4, // 2 blocks, 64 rounds, 4 bytes/round
+#ifndef _WIN64
+  _XMM_SAVE_SIZE = 0,
+#else
+  _XMM_SAVE_SIZE = 8*16,
+#endif
+  _INP_END_SIZE = 8,
+  _INP_SIZE = 8,
+  _CTX_SIZE = 8,
+  _RSP_SIZE = 8,
+
+  _XFER = 0,
+  _XMM_SAVE  = _XFER     + _XFER_SIZE,
+  _INP_END   = _XMM_SAVE + _XMM_SAVE_SIZE,
+  _INP       = _INP_END  + _INP_END_SIZE,
+  _CTX       = _INP      + _INP_SIZE,
+  _RSP       = _CTX      + _CTX_SIZE,
+  STACK_SIZE = _RSP      + _RSP_SIZE
+};
+
+#ifndef _WIN64
+  push(rcx);    // linux: this is limit, need at the end
+  push(rdx);    // linux: this is ofs
+#else
+  push(r8);     // win64: this is ofs
+  push(r9);     // win64: this is limit, we need them again at the very and
+#endif
+
+
+  push(rbx);
+#ifdef _WIN64
+  push(rsi);
+  push(rdi);
+#endif
+  push(rbp);
+  push(r12);
+  push(r13);
+  push(r14);
+  push(r15);
+
+  movq(rax, rsp);
+  subq(rsp, STACK_SIZE);
+  andq(rsp, -32);
+  movq(Address(rsp, _RSP), rax);
+
+#ifndef _WIN64
+  // copy linux params to win64 params, therefore the rest of code will be the same for both
+  movq(r9,  rcx);
+  movq(r8,  rdx);
+  movq(rdx, rsi);
+  movq(rcx, rdi);
+#endif
+
+  // setting original assembly ABI
+  /** message to encrypt in INP */
+  lea(INP, Address(rcx, 0));    // rcx == message (buf)     ;; linux: INP = buf = rdi
+  /** digest in CTX             */
+  movq(CTX, rdx);               // rdx = digest  (state)    ;; linux: CTX = state = rsi
+
+  /** NUM_BLK is the length of message, need to set it from ofs and limit  */
+  if (multi_block) {
+
+    // Win64: cannot directly update NUM_BLKS, since NUM_BLKS = ofs = r8
+    // on entry r8 = ofs
+    // on exit  r8 = NUM_BLKS
+
+    xorq(rax, rax);
+
+    bind(compute_size);
+    cmpptr(r8, r9); // assume the original ofs <= limit ;; linux:  cmp rcx, rdx
+    jccb(Assembler::aboveEqual, compute_size_end);
+    addq(r8, 64);                                          //;; linux: ofs = rdx
+    addq(rax, 64);
+    jmpb(compute_size);
+
+    bind(compute_size_end);
+    movq(NUM_BLKS, rax);  // NUM_BLK (r8)                  ;; linux: NUM_BLK = rdx
+
+    cmpq(NUM_BLKS, 0);
+    jcc(Assembler::equal, done_hash);
+
+    } else {
+    xorq(NUM_BLKS, NUM_BLKS);
+    addq(NUM_BLKS, 64);
+  }//if (!multi_block)
+
+  lea(NUM_BLKS, Address(INP, NUM_BLKS, Address::times_1, -64)); // pointer to the last block
+  movq(Address(rsp, _INP_END), NUM_BLKS);  //
+
+  cmpptr(INP, NUM_BLKS);                   //cmp INP, NUM_BLKS
+  jcc(Assembler::equal, only_one_block);   //je only_one_block
+
+  // load initial digest
+  movl(a, Address(CTX, 4*0));
+  movl(b, Address(CTX, 4*1));
+  movl(c, Address(CTX, 4*2));
+  movl(d, Address(CTX, 4*3));
+  movl(e, Address(CTX, 4*4));
+  movl(f, Address(CTX, 4*5));
+  movl(g, Address(CTX, 4*6));
+  movl(h, Address(CTX, 4*7));
+
+  pshuffle_byte_flip_mask_addr = pshuffle_byte_flip_mask;
+  vmovdqu(BYTE_FLIP_MASK, ExternalAddress(pshuffle_byte_flip_mask_addr +0)); //[PSHUFFLE_BYTE_FLIP_MASK wrt rip]
+  vmovdqu(SHUF_00BA, ExternalAddress(pshuffle_byte_flip_mask_addr + 32));     //[_SHUF_00BA wrt rip]
+  vmovdqu(SHUF_DC00, ExternalAddress(pshuffle_byte_flip_mask_addr + 64));     //[_SHUF_DC00 wrt rip]
+
+  movq(Address(rsp, _CTX), CTX);           // store
+
+bind(loop0);
+  lea(TBL, ExternalAddress(K256_W));
+
+  // assume buffers not aligned
+
+  // Load first 16 dwords from two blocks
+  vmovdqu(xmm0, Address(INP, 0*32));
+  vmovdqu(xmm1, Address(INP, 1*32));
+  vmovdqu(xmm2, Address(INP, 2*32));
+  vmovdqu(xmm3, Address(INP, 3*32));
+
+  // byte swap data
+  vpshufb(xmm0, xmm0, BYTE_FLIP_MASK, AVX_256bit);
+  vpshufb(xmm1, xmm1, BYTE_FLIP_MASK, AVX_256bit);
+  vpshufb(xmm2, xmm2, BYTE_FLIP_MASK, AVX_256bit);
+  vpshufb(xmm3, xmm3, BYTE_FLIP_MASK, AVX_256bit);
+
+  // transpose data into high/low halves
+  vperm2i128(xmm4, xmm0, xmm2, 0x20);
+  vperm2i128(xmm5, xmm0, xmm2, 0x31);
+  vperm2i128(xmm6, xmm1, xmm3, 0x20);
+  vperm2i128(xmm7, xmm1, xmm3, 0x31);
+
+bind(last_block_enter);
+  addq(INP, 64);
+  movq(Address(rsp, _INP), INP);
+
+  //;; schedule 48 input dwords, by doing 3 rounds of 12 each
+  xorq(SRND, SRND);
+
+align(16);
+bind(loop1);
+  vpaddd(xmm9, xmm4, Address(TBL, SRND, Address::times_1, 0*32), AVX_256bit);
+  vmovdqu(Address(rsp, SRND, Address::times_1, _XFER + 0*32), xmm9);
+  sha256_AVX2_one_round_and_sched(xmm4, xmm5, xmm6, xmm7, rax, rbx, rdi, rsi, r8,  r9,  r10, r11, 0);
+  sha256_AVX2_one_round_and_sched(xmm4, xmm5, xmm6, xmm7, r11, rax, rbx, rdi, rsi, r8,  r9,  r10, 1);
+  sha256_AVX2_one_round_and_sched(xmm4, xmm5, xmm6, xmm7, r10, r11, rax, rbx, rdi, rsi, r8,  r9,  2);
+  sha256_AVX2_one_round_and_sched(xmm4, xmm5, xmm6, xmm7, r9,  r10, r11, rax, rbx, rdi, rsi, r8,  3);
+
+  vpaddd(xmm9, xmm5, Address(TBL, SRND, Address::times_1, 1*32), AVX_256bit);
+  vmovdqu(Address(rsp, SRND, Address::times_1, _XFER + 1*32), xmm9);
+  sha256_AVX2_one_round_and_sched(xmm5, xmm6, xmm7, xmm4, r8,  r9,  r10, r11, rax, rbx, rdi, rsi,  8+0);
+  sha256_AVX2_one_round_and_sched(xmm5, xmm6, xmm7, xmm4, rsi, r8,  r9,  r10, r11, rax, rbx, rdi,  8+1);
+  sha256_AVX2_one_round_and_sched(xmm5, xmm6, xmm7, xmm4, rdi, rsi, r8,  r9,  r10, r11, rax, rbx,  8+2);
+  sha256_AVX2_one_round_and_sched(xmm5, xmm6, xmm7, xmm4, rbx, rdi, rsi, r8,  r9,  r10, r11, rax,  8+3);
+
+  vpaddd(xmm9, xmm6, Address(TBL, SRND, Address::times_1, 2*32), AVX_256bit);
+  vmovdqu(Address(rsp, SRND, Address::times_1, _XFER + 2*32), xmm9);
+  sha256_AVX2_one_round_and_sched(xmm6, xmm7, xmm4, xmm5, rax, rbx, rdi, rsi, r8,  r9,  r10, r11, 16+0);
+  sha256_AVX2_one_round_and_sched(xmm6, xmm7, xmm4, xmm5, r11, rax, rbx, rdi, rsi, r8,  r9,  r10, 16+1);
+  sha256_AVX2_one_round_and_sched(xmm6, xmm7, xmm4, xmm5, r10, r11, rax, rbx, rdi, rsi, r8,  r9,  16+2);
+  sha256_AVX2_one_round_and_sched(xmm6, xmm7, xmm4, xmm5, r9,  r10, r11, rax, rbx, rdi, rsi, r8,  16+3);
+
+  vpaddd(xmm9, xmm7, Address(TBL, SRND, Address::times_1, 3*32), AVX_256bit);
+  vmovdqu(Address(rsp, SRND, Address::times_1, _XFER + 3*32), xmm9);
+
+  sha256_AVX2_one_round_and_sched(xmm7, xmm4, xmm5, xmm6, r8,  r9,  r10, r11, rax, rbx, rdi, rsi,  24+0);
+  sha256_AVX2_one_round_and_sched(xmm7, xmm4, xmm5, xmm6, rsi, r8,  r9,  r10, r11, rax, rbx, rdi,  24+1);
+  sha256_AVX2_one_round_and_sched(xmm7, xmm4, xmm5, xmm6, rdi, rsi, r8,  r9,  r10, r11, rax, rbx,  24+2);
+  sha256_AVX2_one_round_and_sched(xmm7, xmm4, xmm5, xmm6, rbx, rdi, rsi, r8,  r9,  r10, r11, rax,  24+3);
+
+  addq(SRND, 4*32);
+  cmpq(SRND, 3 * 4*32);
+  jcc(Assembler::below, loop1);
+
+bind(loop2);
+  // Do last 16 rounds with no scheduling
+  vpaddd(xmm9, xmm4, Address(TBL, SRND, Address::times_1, 0*32), AVX_256bit);
+  vmovdqu(Address(rsp, SRND, Address::times_1, _XFER + 0*32), xmm9);
+  sha256_AVX2_four_rounds_compute_first(0);
+
+  vpaddd(xmm9, xmm5, Address(TBL, SRND, Address::times_1, 1*32), AVX_256bit);
+  vmovdqu(Address(rsp, SRND, Address::times_1, _XFER + 1*32), xmm9);
+  sha256_AVX2_four_rounds_compute_last(0 + 8);
+
+  addq(SRND, 2*32);
+
+  vmovdqu(xmm4, xmm6);
+  vmovdqu(xmm5, xmm7);
+
+  cmpq(SRND, 4 * 4*32);
+  jcc(Assembler::below, loop2);
+
+  movq(CTX, Address(rsp, _CTX));
+  movq(INP, Address(rsp, _INP));
+
+  addm(4*0, CTX, a);
+  addm(4*1, CTX, b);
+  addm(4*2, CTX, c);
+  addm(4*3, CTX, d);
+  addm(4*4, CTX, e);
+  addm(4*5, CTX, f);
+  addm(4*6, CTX, g);
+  addm(4*7, CTX, h);
+
+  cmpq(INP, Address(rsp, _INP_END));
+  jcc(Assembler::above, done_hash);
+
+  //Do second block using previously scheduled results
+  xorq(SRND, SRND);
+align(16);
+bind(loop3);
+  sha256_AVX2_four_rounds_compute_first(4);
+  sha256_AVX2_four_rounds_compute_last(4+8);
+
+  addq(SRND, 2*32);
+  cmpq(SRND, 4 * 4*32);
+  jcc(Assembler::below, loop3);
+
+  movq(CTX, Address(rsp, _CTX));
+  movq(INP, Address(rsp, _INP));
+  addq(INP, 64);
+
+  addm(4*0, CTX, a);
+  addm(4*1, CTX, b);
+  addm(4*2, CTX, c);
+  addm(4*3, CTX, d);
+  addm(4*4, CTX, e);
+  addm(4*5, CTX, f);
+  addm(4*6, CTX, g);
+  addm(4*7, CTX, h);
+
+  cmpq(INP, Address(rsp, _INP_END));
+  jcc(Assembler::below, loop0);
+  jccb(Assembler::above, done_hash);
+
+bind(do_last_block);
+  lea(TBL, ExternalAddress(K256_W));
+
+  movdqu(xmm4, Address(INP, 0*16));
+  movdqu(xmm5, Address(INP, 1*16));
+  movdqu(xmm6, Address(INP, 2*16));
+  movdqu(xmm7, Address(INP, 3*16));
+
+  vpshufb(xmm4, xmm4, xmm13, AVX_128bit);
+  vpshufb(xmm5, xmm5, xmm13, AVX_128bit);
+  vpshufb(xmm6, xmm6, xmm13, AVX_128bit);
+  vpshufb(xmm7, xmm7, xmm13, AVX_128bit);
+
+  jmp(last_block_enter);
+
+bind(only_one_block);
+
+  // load initial digest ;; table should be preloaded with following values
+  movl(a, Address(CTX, 4*0));   // 0x6a09e667
+  movl(b, Address(CTX, 4*1));   // 0xbb67ae85
+  movl(c, Address(CTX, 4*2));   // 0x3c6ef372
+  movl(d, Address(CTX, 4*3));   // 0xa54ff53a
+  movl(e, Address(CTX, 4*4));   // 0x510e527f
+  movl(f, Address(CTX, 4*5));   // 0x9b05688c
+  movl(g, Address(CTX, 4*6));   // 0x1f83d9ab
+  movl(h, Address(CTX, 4*7));   // 0x5be0cd19
+
+
+  pshuffle_byte_flip_mask_addr = pshuffle_byte_flip_mask;
+  vmovdqu(BYTE_FLIP_MASK, ExternalAddress(pshuffle_byte_flip_mask_addr + 0)); //[PSHUFFLE_BYTE_FLIP_MASK wrt rip]
+  vmovdqu(SHUF_00BA, ExternalAddress(pshuffle_byte_flip_mask_addr + 32));     //[_SHUF_00BA wrt rip]
+  vmovdqu(SHUF_DC00, ExternalAddress(pshuffle_byte_flip_mask_addr + 64));     //[_SHUF_DC00 wrt rip]
+
+  movq(Address(rsp, _CTX), CTX);
+  jmpb(do_last_block);
+
+bind(done_hash);
+
+  movq(rsp, Address(rsp, _RSP));
+
+  pop(r15);
+  pop(r14);
+  pop(r13);
+  pop(r12);
+  pop(rbp);
+#ifdef _WIN64
+  pop(rdi);
+  pop(rsi);
+#endif
+  pop(rbx);
+
+#ifdef _WIN64
+  pop(r9);
+  pop(r8);
+#else
+  pop(rdx);
+  pop(rcx);
+#endif
+
+  if (multi_block) {
+#ifdef _WIN64
+const Register& limit_end = r9;
+const Register& ofs_end   = r8;
+#else
+const Register& limit_end = rcx;
+const Register& ofs_end   = rdx;
+#endif
+    movq(rax, ofs_end);
+
+bind(compute_size1);
+    cmpptr(rax, limit_end); // assume the original ofs <= limit
+    jccb(Assembler::aboveEqual, compute_size_end1);
+    addq(rax, 64);
+    jmpb(compute_size1);
+
+bind(compute_size_end1);
+  }
+}
+#endif //#ifdef _LP64
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Tue Apr 26 20:43:59 2016 -0700
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Tue Apr 26 21:54:21 2016 -0700
@@ -3771,12 +3771,29 @@
     address start = __ pc();
     __ emit_data64(0x0405060700010203, relocInfo::none);
     __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none);
+
+    if (VM_Version::supports_avx2()) {
+      __ emit_data64(0x0405060700010203, relocInfo::none); // second copy
+      __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none);
+      // _SHUF_00BA
+      __ emit_data64(0x0b0a090803020100, relocInfo::none);
+      __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none);
+      __ emit_data64(0x0b0a090803020100, relocInfo::none);
+      __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none);
+      // _SHUF_DC00
+      __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none);
+      __ emit_data64(0x0b0a090803020100, relocInfo::none);
+      __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none);
+      __ emit_data64(0x0b0a090803020100, relocInfo::none);
+    }
+
     return start;
   }
 
 // ofs and limit are use for multi-block byte array.
 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit)
   address generate_sha256_implCompress(bool multi_block, const char *name) {
+    assert(VM_Version::supports_sha() || VM_Version::supports_avx2(), "");
     __ align(CodeEntryAlignment);
     StubCodeMark mark(this, "StubRoutines", name);
     address start = __ pc();
@@ -3805,16 +3822,37 @@
     __ movdqu(Address(rsp, 0), xmm6);
     __ movdqu(Address(rsp, 2 * wordSize), xmm7);
     __ movdqu(Address(rsp, 4 * wordSize), xmm8);
+
+    if (!VM_Version::supports_sha() && VM_Version::supports_avx2()) {
+      __ subptr(rsp, 10 * wordSize);
+      __ movdqu(Address(rsp, 0), xmm9);
+      __ movdqu(Address(rsp, 2 * wordSize), xmm10);
+      __ movdqu(Address(rsp, 4 * wordSize), xmm11);
+      __ movdqu(Address(rsp, 6 * wordSize), xmm12);
+      __ movdqu(Address(rsp, 8 * wordSize), xmm13);
+    }
 #endif
 
     __ subptr(rsp, 4 * wordSize);
 
-    __ fast_sha256(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4,
-      buf, state, ofs, limit, rsp, multi_block, shuf_mask);
-
+    if (VM_Version::supports_sha()) {
+      __ fast_sha256(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4,
+        buf, state, ofs, limit, rsp, multi_block, shuf_mask);
+    } else if (VM_Version::supports_avx2()) {
+      __ sha256_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4,
+        buf, state, ofs, limit, rsp, multi_block, shuf_mask);
+    }
     __ addptr(rsp, 4 * wordSize);
 #ifdef _WIN64
     // restore xmm regs belonging to calling function
+    if (!VM_Version::supports_sha() && VM_Version::supports_avx2()) {
+      __ movdqu(xmm9, Address(rsp, 0));
+      __ movdqu(xmm10, Address(rsp, 2 * wordSize));
+      __ movdqu(xmm11, Address(rsp, 4 * wordSize));
+      __ movdqu(xmm12, Address(rsp, 6 * wordSize));
+      __ movdqu(xmm13, Address(rsp, 8 * wordSize));
+      __ addptr(rsp, 10 * wordSize);
+    }
     __ movdqu(xmm6, Address(rsp, 0));
     __ movdqu(xmm7, Address(rsp, 2 * wordSize));
     __ movdqu(xmm8, Address(rsp, 4 * wordSize));
@@ -5217,6 +5255,13 @@
     }
     if (UseSHA256Intrinsics) {
       StubRoutines::x86::_k256_adr = (address)StubRoutines::x86::_k256;
+      char* dst = (char*)StubRoutines::x86::_k256_W;
+      char* src = (char*)StubRoutines::x86::_k256;
+      for (int ii = 0; ii < 16; ++ii) {
+        memcpy(dst + 32 * ii,      src + 16 * ii, 16);
+        memcpy(dst + 32 * ii + 16, src + 16 * ii, 16);
+      }
+      StubRoutines::x86::_k256_W_adr = (address)StubRoutines::x86::_k256_W;
       StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask();
       StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress");
       StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB");
--- a/hotspot/src/cpu/x86/vm/stubRoutines_x86.cpp	Tue Apr 26 20:43:59 2016 -0700
+++ b/hotspot/src/cpu/x86/vm/stubRoutines_x86.cpp	Tue Apr 26 21:54:21 2016 -0700
@@ -46,6 +46,9 @@
 address StubRoutines::x86::_upper_word_mask_addr = NULL;
 address StubRoutines::x86::_shuffle_byte_flip_mask_addr = NULL;
 address StubRoutines::x86::_k256_adr = NULL;
+#ifdef _LP64
+address StubRoutines::x86::_k256_W_adr = NULL;
+#endif
 address StubRoutines::x86::_pshuffle_byte_flip_mask_addr = NULL;
 
 //tables common for sin and cos
@@ -289,3 +292,9 @@
     0x748f82eeUL, 0x78a5636fUL, 0x84c87814UL, 0x8cc70208UL,
     0x90befffaUL, 0xa4506cebUL, 0xbef9a3f7UL, 0xc67178f2UL
 };
+
+#ifdef _LP64
+// used in MacroAssembler::sha256_AVX2
+// dynamically built from _k256
+ALIGNED_(64) juint StubRoutines::x86::_k256_W[2*sizeof(StubRoutines::x86::_k256)];
+#endif
--- a/hotspot/src/cpu/x86/vm/stubRoutines_x86.hpp	Tue Apr 26 20:43:59 2016 -0700
+++ b/hotspot/src/cpu/x86/vm/stubRoutines_x86.hpp	Tue Apr 26 21:54:21 2016 -0700
@@ -54,6 +54,10 @@
   //k256 table for sha256
   static juint _k256[];
   static address _k256_adr;
+#ifdef _LP64
+  static juint _k256_W[];
+  static address _k256_W_adr;
+#endif
   // byte flip mask for sha256
   static address _pshuffle_byte_flip_mask_addr;
 
@@ -109,6 +113,9 @@
   static address upper_word_mask_addr() { return _upper_word_mask_addr; }
   static address shuffle_byte_flip_mask_addr() { return _shuffle_byte_flip_mask_addr; }
   static address k256_addr()      { return _k256_adr; }
+#ifdef _LP64
+  static address k256_W_addr()    { return _k256_W_adr; }
+#endif
   static address pshuffle_byte_flip_mask_addr() { return _pshuffle_byte_flip_mask_addr; }
   static void generate_CRC32C_table(bool is_pclmulqdq_supported);
   static address _ONEHALF_addr()      { return _ONEHALF_adr; }
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.cpp	Tue Apr 26 20:43:59 2016 -0700
+++ b/hotspot/src/cpu/x86/vm/vm_version_x86.cpp	Tue Apr 26 21:54:21 2016 -0700
@@ -732,7 +732,7 @@
     FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
   }
 
-  if (supports_sha()) {
+  if (supports_sha() LP64_ONLY(|| supports_avx2() && supports_bmi2())) {
     if (FLAG_IS_DEFAULT(UseSHA)) {
       UseSHA = true;
     }
@@ -741,7 +741,7 @@
     FLAG_SET_DEFAULT(UseSHA, false);
   }
 
-  if (UseSHA) {
+  if (supports_sha() && UseSHA) {
     if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) {
       FLAG_SET_DEFAULT(UseSHA1Intrinsics, true);
     }
--- a/hotspot/test/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForUnsupportedX86CPU.java	Tue Apr 26 20:43:59 2016 -0700
+++ b/hotspot/test/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForUnsupportedX86CPU.java	Tue Apr 26 21:54:21 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,9 @@
 import jdk.test.lib.ExitCode;
 import jdk.test.lib.Platform;
 import jdk.test.lib.cli.CommandLineOptionTest;
+import jdk.test.lib.cli.predicate.AndPredicate;
 import jdk.test.lib.cli.predicate.OrPredicate;
+import jdk.test.lib.cli.predicate.NotPredicate;
 
 /**
  * Generic test case for SHA-related options targeted to X86 CPUs that don't
@@ -33,21 +35,14 @@
 public class GenericTestCaseForUnsupportedX86CPU
         extends SHAOptionsBase.TestCase {
     public GenericTestCaseForUnsupportedX86CPU(String optionName) {
-        super(optionName, new OrPredicate(Platform::isX64, Platform::isX86));
+        super(optionName, new AndPredicate(new OrPredicate(Platform::isX64, Platform::isX86),
+                new NotPredicate(SHAOptionsBase.getPredicateForOption(
+                        optionName))));
     }
 
     @Override
     protected void verifyWarnings() throws Throwable {
-        String shouldPassMessage = String.format("JVM should start with '-XX:+"
-                + "%s' flag, but output should contain warning.", optionName);
-        // Verify that when the tested option is explicitly enabled, then
-        // a warning will occur in VM output.
-        CommandLineOptionTest.verifySameJVMStartup(new String[] {
-                        SHAOptionsBase.getWarningForUnsupportedCPU(optionName)
-                }, null, shouldPassMessage, shouldPassMessage, ExitCode.OK,
-                CommandLineOptionTest.prepareBooleanFlag(optionName, true));
-
-        shouldPassMessage = String.format("JVM should start with '-XX:-%s' "
+        String shouldPassMessage = String.format("JVM should start with '-XX:-%s' "
                 + "flag without any warnings", optionName);
         // Verify that the tested option could be explicitly disabled without
         // a warning.
@@ -55,6 +50,19 @@
                         SHAOptionsBase.getWarningForUnsupportedCPU(optionName)
                 }, shouldPassMessage, shouldPassMessage, ExitCode.OK,
                 CommandLineOptionTest.prepareBooleanFlag(optionName, false));
+
+        // Verify that when the tested option is enabled, then
+        // a warning will occur in VM output if UseSHA is disabled.
+        if (!optionName.equals(SHAOptionsBase.USE_SHA_OPTION)) {
+            CommandLineOptionTest.verifySameJVMStartup(
+                    new String[] { SHAOptionsBase.getWarningForUnsupportedCPU(optionName) },
+                    null,
+                    shouldPassMessage,
+                    shouldPassMessage,
+                    ExitCode.OK,
+                    CommandLineOptionTest.prepareBooleanFlag(SHAOptionsBase.USE_SHA_OPTION, false),
+                    CommandLineOptionTest.prepareBooleanFlag(optionName, true));
+        }
     }
 
     @Override
--- a/hotspot/test/compiler/testlibrary/sha/predicate/IntrinsicPredicates.java	Tue Apr 26 20:43:59 2016 -0700
+++ b/hotspot/test/compiler/testlibrary/sha/predicate/IntrinsicPredicates.java	Tue Apr 26 21:54:21 2016 -0700
@@ -59,14 +59,23 @@
     };
 
     public static final BooleanSupplier SHA1_INSTRUCTION_AVAILABLE
-            = new OrPredicate(
-                    new CPUSpecificPredicate("sparc.*", new String[] { "sha1" },null),
-                    new CPUSpecificPredicate("aarch64.*", new String[] { "sha1" },null));
+            = new OrPredicate(new CPUSpecificPredicate("x86.*", new String[] { "sha" },null),
+              new OrPredicate(new CPUSpecificPredicate("amd64.*", new String[] { "sha" },null),
+              new OrPredicate(new CPUSpecificPredicate("i386.*", new String[] { "sha" },null),
+              new OrPredicate(
+                      new CPUSpecificPredicate("sparc.*", new String[] { "sha1" },null),
+                      new CPUSpecificPredicate("aarch64.*", new String[] { "sha1" },null)))));
 
     public static final BooleanSupplier SHA256_INSTRUCTION_AVAILABLE
-            = new OrPredicate(
-                    new CPUSpecificPredicate("sparc.*", new String[] { "sha256" },null),
-                    new CPUSpecificPredicate("aarch64.*", new String[] { "sha256" },null));
+            = new OrPredicate(new CPUSpecificPredicate("x86.*", new String[] { "sha" },null),
+              new OrPredicate(new CPUSpecificPredicate("amd64.*", new String[] { "sha" },null),
+              new OrPredicate(new CPUSpecificPredicate("i386.*", new String[] {
+"sha" },null),
+              new OrPredicate(new CPUSpecificPredicate("x86_64", new String[] { "avx2", "bmi2" }, null),
+              new OrPredicate(new CPUSpecificPredicate("amd64.*", new String[] { "avx2", "bmi2" }, null),
+              new OrPredicate(
+                      new CPUSpecificPredicate("sparc.*", new String[] { "sha256" },null),
+                      new CPUSpecificPredicate("aarch64.*", new String[] { "sha256" },null)))))));
 
     public static final BooleanSupplier SHA512_INSTRUCTION_AVAILABLE
             = new OrPredicate(
--- a/hotspot/test/testlibrary/jdk/test/lib/cli/predicate/CPUSpecificPredicate.java	Tue Apr 26 20:43:59 2016 -0700
+++ b/hotspot/test/testlibrary/jdk/test/lib/cli/predicate/CPUSpecificPredicate.java	Tue Apr 26 21:54:21 2016 -0700
@@ -44,7 +44,7 @@
     @Override
     public boolean getAsBoolean() {
         if (!Platform.getOsArch().matches(cpuArchPattern)) {
-            System.out.println("CPU arch does not match " + cpuArchPattern);
+            System.out.println("CPU arch " + Platform.getOsArch() + " does not match " + cpuArchPattern);
             return false;
         }