--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp Fri Sep 11 16:11:07 2015 +0300
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp Fri Sep 11 17:02:44 2015 -0700
@@ -795,6 +795,12 @@
void xmm_copy_forward(Register from, Register to_from, Register qword_count) {
assert( UseSSE >= 2, "supported cpu only" );
Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit;
+ if (UseAVX > 2) {
+ __ push(rbx);
+ __ movl(rbx, 0xffff);
+ __ kmovdl(k1, rbx);
+ __ pop(rbx);
+ }
// Copy 64-byte chunks
__ jmpb(L_copy_64_bytes);
__ align(OptoLoopAlignment);
@@ -802,8 +808,8 @@
if (UseUnalignedLoadStores) {
if (UseAVX > 2) {
- __ evmovdqu(xmm0, Address(from, 0), Assembler::AVX_512bit);
- __ evmovdqu(Address(from, to_from, Address::times_1, 0), xmm0, Assembler::AVX_512bit);
+ __ evmovdqul(xmm0, Address(from, 0), Assembler::AVX_512bit);
+ __ evmovdqul(Address(from, to_from, Address::times_1, 0), xmm0, Assembler::AVX_512bit);
} else if (UseAVX == 2) {
__ vmovdqu(xmm0, Address(from, 0));
__ vmovdqu(Address(from, to_from, Address::times_1, 0), xmm0);
@@ -2217,6 +2223,15 @@
const XMMRegister xmm_temp4 = xmm5;
__ enter(); // required for proper stackwalking of RuntimeStub frame
+
+ // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
+ // context for the registers used, where all instructions below are using 128-bit mode
+ // On EVEX without VL and BW, these instructions will all be AVX.
+ if (VM_Version::supports_avx512vlbw()) {
+ __ movl(rdx, 0xffff);
+ __ kmovdl(k1, rdx);
+ }
+
__ movptr(from, from_param);
__ movptr(key, key_param);
@@ -2315,6 +2330,15 @@
const XMMRegister xmm_temp4 = xmm5;
__ enter(); // required for proper stackwalking of RuntimeStub frame
+
+ // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
+ // context for the registers used, where all instructions below are using 128-bit mode
+ // On EVEX without VL and BW, these instructions will all be AVX.
+ if (VM_Version::supports_avx512vlbw()) {
+ __ movl(rdx, 0xffff);
+ __ kmovdl(k1, rdx);
+ }
+
__ movptr(from, from_param);
__ movptr(key, key_param);
@@ -2441,6 +2465,14 @@
__ enter(); // required for proper stackwalking of RuntimeStub frame
handleSOERegisters(true /*saving*/);
+ // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
+ // context for the registers used, where all instructions below are using 128-bit mode
+ // On EVEX without VL and BW, these instructions will all be AVX.
+ if (VM_Version::supports_avx512vlbw()) {
+ __ movl(rdx, 0xffff);
+ __ kmovdl(k1, rdx);
+ }
+
// load registers from incoming parameters
const Address from_param(rbp, 8+0);
const Address to_param (rbp, 8+4);
@@ -2602,6 +2634,14 @@
__ enter(); // required for proper stackwalking of RuntimeStub frame
handleSOERegisters(true /*saving*/);
+ // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
+ // context for the registers used, where all instructions below are using 128-bit mode
+ // On EVEX without VL and BW, these instructions will all be AVX.
+ if (VM_Version::supports_avx512vlbw()) {
+ __ movl(rdx, 0xffff);
+ __ kmovdl(k1, rdx);
+ }
+
// load registers from incoming parameters
const Address from_param(rbp, 8+0);
const Address to_param (rbp, 8+4);
@@ -2782,6 +2822,14 @@
__ enter();
handleSOERegisters(true); // Save registers
+ // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
+ // context for the registers used, where all instructions below are using 128-bit mode
+ // On EVEX without VL and BW, these instructions will all be AVX.
+ if (VM_Version::supports_avx512vlbw()) {
+ __ movl(rdx, 0xffff);
+ __ kmovdl(k1, rdx);
+ }
+
__ movptr(state, state_param);
__ movptr(subkeyH, subkeyH_param);
__ movptr(data, data_param);