--- a/hotspot/src/cpu/x86/vm/assembler_x86.hpp Mon Oct 26 10:36:54 2015 +0100
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.hpp Mon Oct 26 19:33:31 2015 -0700
@@ -2147,6 +2147,11 @@
// runtime code and native libraries.
void vzeroupper();
+ // AVX support for vectorized conditional move (double). The following two instructions used only coupled.
+ void cmppd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len);
+ void vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len);
+
+
protected:
// Next instructions require address alignment 16 bytes SSE mode.
// They should be called only from corresponding MacroAssembler instructions.