2112 // penalty if legacy SSE instructions are encoded using VEX prefix because |
2112 // penalty if legacy SSE instructions are encoded using VEX prefix because |
2113 // they always clear upper 128 bits. It should be used before calling |
2113 // they always clear upper 128 bits. It should be used before calling |
2114 // runtime code and native libraries. |
2114 // runtime code and native libraries. |
2115 void vzeroupper(); |
2115 void vzeroupper(); |
2116 |
2116 |
2117 // AVX support for vectorized conditional move (double). The following two instructions used only coupled. |
2117 // AVX support for vectorized conditional move (float/double). The following two instructions used only coupled. |
2118 void cmppd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len); |
2118 void cmppd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len); |
2119 void blendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len); |
2119 void blendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len); |
|
2120 void cmpps(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len); |
|
2121 void blendvps(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len); |
2120 void vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len); |
2122 void vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len); |
2121 |
2123 |
2122 protected: |
2124 protected: |
2123 // Next instructions require address alignment 16 bytes SSE mode. |
2125 // Next instructions require address alignment 16 bytes SSE mode. |
2124 // They should be called only from corresponding MacroAssembler instructions. |
2126 // They should be called only from corresponding MacroAssembler instructions. |