8078113: 8011102 changes may cause incorrect results
authorkvn
Fri, 17 Apr 2015 17:39:19 -0700
changeset 30299 1f6f7d1e0c1e
parent 30298 00461a39b519
child 30300 4b12a5b40064
8078113: 8011102 changes may cause incorrect results Summary: replace Vzeroupper instruction in stubs with zeroing only used ymm registers. Reviewed-by: kvn Contributed-by: sandhya.viswanathan@intel.com
hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp
hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp
hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Wed Apr 15 14:51:56 2015 +0200
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Fri Apr 17 17:39:19 2015 -0700
@@ -6657,7 +6657,7 @@
     subl(cnt2, stride2);
     jccb(Assembler::notZero, COMPARE_WIDE_VECTORS_LOOP);
     // clean upper bits of YMM registers
-    vzeroupper();
+    vpxor(vec1, vec1);
 
     // compare wide vectors tail
     bind(COMPARE_WIDE_TAIL);
@@ -6672,7 +6672,7 @@
     // Identifies the mismatching (higher or lower)16-bytes in the 32-byte vectors.
     bind(VECTOR_NOT_EQUAL);
     // clean upper bits of YMM registers
-    vzeroupper();
+    vpxor(vec1, vec1);
     lea(str1, Address(str1, result, scale));
     lea(str2, Address(str2, result, scale));
     jmp(COMPARE_16_CHARS);
@@ -6931,7 +6931,8 @@
   bind(DONE);
   if (UseAVX >= 2) {
     // clean upper bits of YMM registers
-    vzeroupper();
+    vpxor(vec1, vec1);
+    vpxor(vec2, vec2);
   }
 }
 
@@ -7065,7 +7066,8 @@
 
         BIND(L_check_fill_8_bytes);
         // clean upper bits of YMM registers
-        vzeroupper();
+        movdl(xtmp, value);
+        pshufd(xtmp, xtmp, 0);
       } else {
         // Fill 32-byte chunks
         pshufd(xtmp, xtmp, 0);
@@ -7228,7 +7230,11 @@
     bind(L_copy_16_chars_exit);
     if (UseAVX >= 2) {
       // clean upper bits of YMM registers
-      vzeroupper();
+      vpxor(tmp2Reg, tmp2Reg);
+      vpxor(tmp3Reg, tmp3Reg);
+      vpxor(tmp4Reg, tmp4Reg);
+      movdl(tmp1Reg, tmp5);
+      pshufd(tmp1Reg, tmp1Reg, 0);
     }
     subptr(len, 8);
     jccb(Assembler::greater, L_copy_8_chars_exit);
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Wed Apr 15 14:51:56 2015 +0200
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Fri Apr 17 17:39:19 2015 -0700
@@ -835,7 +835,8 @@
 
     if (UseUnalignedLoadStores && (UseAVX >= 2)) {
       // clean upper bits of YMM registers
-      __ vzeroupper();
+      __ vpxor(xmm0, xmm0);
+      __ vpxor(xmm1, xmm1);
     }
     __ addl(qword_count, 8);
     __ jccb(Assembler::zero, L_exit);
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Wed Apr 15 14:51:56 2015 +0200
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Fri Apr 17 17:39:19 2015 -0700
@@ -1352,7 +1352,8 @@
       __ BIND(L_end);
       if (UseAVX >= 2) {
         // clean upper bits of YMM registers
-        __ vzeroupper();
+        __ vpxor(xmm0, xmm0);
+        __ vpxor(xmm1, xmm1);
       }
     } else {
       // Copy 32-bytes per iteration
@@ -1429,7 +1430,8 @@
       __ BIND(L_end);
       if (UseAVX >= 2) {
         // clean upper bits of YMM registers
-        __ vzeroupper();
+        __ vpxor(xmm0, xmm0);
+        __ vpxor(xmm1, xmm1);
       }
     } else {
       // Copy 32-bytes per iteration