--- a/hotspot/src/cpu/x86/vm/assembler_x86.hpp Thu Jun 14 14:59:52 2012 -0700
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.hpp Fri Jun 15 01:25:19 2012 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -591,8 +591,9 @@
void vex_prefix(XMMRegister dst, XMMRegister nds, Address src,
VexSimdPrefix pre, bool vector256 = false) {
- vex_prefix(src, nds->encoding(), dst->encoding(),
- pre, VEX_OPCODE_0F, false, vector256);
+ int dst_enc = dst->encoding();
+ int nds_enc = nds->is_valid() ? nds->encoding() : 0;
+ vex_prefix(src, nds_enc, dst_enc, pre, VEX_OPCODE_0F, false, vector256);
}
int vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc,
@@ -600,9 +601,12 @@
bool vex_w, bool vector256);
int vex_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src,
- VexSimdPrefix pre, bool vector256 = false) {
- return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(),
- pre, VEX_OPCODE_0F, false, vector256);
+ VexSimdPrefix pre, bool vector256 = false,
+ VexOpcode opc = VEX_OPCODE_0F) {
+ int src_enc = src->encoding();
+ int dst_enc = dst->encoding();
+ int nds_enc = nds->is_valid() ? nds->encoding() : 0;
+ return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, false, vector256);
}
void simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr,
@@ -1261,6 +1265,7 @@
void movdl(XMMRegister dst, Register src);
void movdl(Register dst, XMMRegister src);
void movdl(XMMRegister dst, Address src);
+ void movdl(Address dst, XMMRegister src);
// Move Double Quadword
void movdq(XMMRegister dst, Register src);
@@ -1274,6 +1279,14 @@
void movdqu(XMMRegister dst, Address src);
void movdqu(XMMRegister dst, XMMRegister src);
+ // Move Unaligned 256bit Vector
+ void vmovdqu(Address dst, XMMRegister src);
+ void vmovdqu(XMMRegister dst, Address src);
+ void vmovdqu(XMMRegister dst, XMMRegister src);
+
+ // Move lower 64bit to high 64bit in 128bit register
+ void movlhps(XMMRegister dst, XMMRegister src);
+
void movl(Register dst, int32_t imm32);
void movl(Address dst, int32_t imm32);
void movl(Register dst, Register src);
@@ -1615,6 +1628,17 @@
void vxorpd(XMMRegister dst, XMMRegister nds, Address src);
void vxorps(XMMRegister dst, XMMRegister nds, Address src);
+ // AVX Vector instrucitons.
+ void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
+ void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
+ void vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src);
+
+ // AVX instruction which is used to clear upper 128 bits of YMM registers and
+ // to avoid transaction penalty between AVX and SSE states. There is no
+ // penalty if legacy SSE instructions are encoded using VEX prefix because
+ // they always clear upper 128 bits. It should be used before calling
+ // runtime code and native libraries.
+ void vzeroupper();
protected:
// Next instructions require address alignment 16 bytes SSE mode.
@@ -2529,9 +2553,13 @@
void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); }
void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
+ // AVX Vector instructions
+
+ void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vxorpd(dst, nds, src, vector256); }
void vxorpd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vxorpd(dst, nds, src); }
void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src);
+ void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vxorps(dst, nds, src, vector256); }
void vxorps(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vxorps(dst, nds, src); }
void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src);