187 for (int n = 0; n < 16; n++) { |
187 for (int n = 0; n < 16; n++) { |
188 __ vextractf64x4h(Address(rsp, base_addr+n*32), as_XMMRegister(n), 1); |
188 __ vextractf64x4h(Address(rsp, base_addr+n*32), as_XMMRegister(n), 1); |
189 } |
189 } |
190 // Save full ZMM registes(16..num_xmm_regs) |
190 // Save full ZMM registes(16..num_xmm_regs) |
191 base_addr = XSAVE_AREA_UPPERBANK; |
191 base_addr = XSAVE_AREA_UPPERBANK; |
192 int off = 0; |
192 off = 0; |
193 int vector_len = Assembler::AVX_512bit; |
193 int vector_len = Assembler::AVX_512bit; |
194 for (int n = 16; n < num_xmm_regs; n++) { |
194 for (int n = 16; n < num_xmm_regs; n++) { |
195 __ evmovdqul(Address(rsp, base_addr+(off++*64)), as_XMMRegister(n), vector_len); |
195 __ evmovdqul(Address(rsp, base_addr+(off++*64)), as_XMMRegister(n), vector_len); |
196 } |
196 } |
197 } |
197 } |
198 } else { |
198 } else { |
199 if (VM_Version::supports_evex()) { |
199 if (VM_Version::supports_evex()) { |
200 // Save upper bank of ZMM registers(16..31) for double/float usage |
200 // Save upper bank of ZMM registers(16..31) for double/float usage |
201 int base_addr = XSAVE_AREA_UPPERBANK; |
201 int base_addr = XSAVE_AREA_UPPERBANK; |
202 int off = 0; |
202 off = 0; |
203 for (int n = 16; n < num_xmm_regs; n++) { |
203 for (int n = 16; n < num_xmm_regs; n++) { |
204 __ movsd(Address(rsp, base_addr+(off++*64)), as_XMMRegister(n)); |
204 __ movsd(Address(rsp, base_addr+(off++*64)), as_XMMRegister(n)); |
205 } |
205 } |
206 } |
206 } |
207 } |
207 } |
323 if (restore_vectors) { |
323 if (restore_vectors) { |
324 assert(UseAVX > 0, "512bit vectors are supported only with EVEX"); |
324 assert(UseAVX > 0, "512bit vectors are supported only with EVEX"); |
325 assert(MaxVectorSize == 64, "only 512bit vectors are supported now"); |
325 assert(MaxVectorSize == 64, "only 512bit vectors are supported now"); |
326 } |
326 } |
327 #else |
327 #else |
328 assert(!save_vectors, "vectors are generated only by C2"); |
328 assert(!restore_vectors, "vectors are generated only by C2"); |
329 #endif |
329 #endif |
330 |
330 |
331 // On EVEX enabled targets everything is handled in pop fpu state |
331 // On EVEX enabled targets everything is handled in pop fpu state |
332 if (restore_vectors) { |
332 if (restore_vectors) { |
333 // Restore upper half of YMM registes (0..15) |
333 // Restore upper half of YMM registes (0..15) |