8168 __ vmulpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
8172 __ vmulpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
8169 %} |
8173 %} |
8170 ins_pipe( pipe_slow ); |
8174 ins_pipe( pipe_slow ); |
8171 %} |
8175 %} |
8172 |
8176 |
|
8177 instruct vcmov8F_reg(vecY dst, vecY src1, vecY src2, immI8 cop, cmpOp_vcmppd copnd) %{ |
|
8178 predicate(UseAVX > 0 && UseAVX < 3 && n->as_Vector()->length() == 8); |
|
8179 match(Set dst (CMoveVF (Binary copnd cop) (Binary src1 src2))); |
|
8180 effect(TEMP dst, USE src1, USE src2); |
|
8181 format %{ "cmpps.$copnd $dst, $src1, $src2 ! vcmovevf, cond=$cop\n\t" |
|
8182 "blendvps $dst,$src1,$src2,$dst ! vcmovevf\n\t" |
|
8183 %} |
|
8184 ins_encode %{ |
|
8185 int vector_len = 1; |
|
8186 int cond = (Assembler::Condition)($copnd$$cmpcode); |
|
8187 __ cmpps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, cond, vector_len); |
|
8188 __ blendvps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, $dst$$XMMRegister, vector_len); |
|
8189 %} |
|
8190 ins_pipe( pipe_slow ); |
|
8191 %} |
|
8192 |
8173 instruct vcmov4D_reg(vecY dst, vecY src1, vecY src2, immI8 cop, cmpOp_vcmppd copnd) %{ |
8193 instruct vcmov4D_reg(vecY dst, vecY src1, vecY src2, immI8 cop, cmpOp_vcmppd copnd) %{ |
8174 predicate(UseAVX > 0 && UseAVX < 3 && n->as_Vector()->length() == 4); |
8194 predicate(UseAVX > 0 && UseAVX < 3 && n->as_Vector()->length() == 4); |
8175 match(Set dst (CMoveVD (Binary copnd cop) (Binary src1 src2))); |
8195 match(Set dst (CMoveVD (Binary copnd cop) (Binary src1 src2))); |
8176 effect(TEMP dst, USE src1, USE src2); |
8196 effect(TEMP dst, USE src1, USE src2); |
8177 format %{ "cmppd.$copnd $dst, $src1, $src2 ! vcmovevd, cond=$cop\n\t" |
8197 format %{ "cmppd.$copnd $dst, $src1, $src2 ! vcmovevd, cond=$cop\n\t" |