3028 emit_int8(0x16); |
3029 emit_int8(0x16); |
3029 emit_int8((unsigned char)(0xC0 | encode)); |
3030 emit_int8((unsigned char)(0xC0 | encode)); |
3030 emit_int8(imm8); |
3031 emit_int8(imm8); |
3031 } |
3032 } |
3032 |
3033 |
|
3034 void Assembler::pextrw(Register dst, XMMRegister src, int imm8) { |
|
3035 assert(VM_Version::supports_sse2(), ""); |
|
3036 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, /* no_mask_reg */ true, |
|
3037 VEX_OPCODE_0F_3A, /* rex_w */ false, AVX_128bit, /* legacy_mode */ _legacy_mode_bw); |
|
3038 emit_int8(0x15); |
|
3039 emit_int8((unsigned char)(0xC0 | encode)); |
|
3040 emit_int8(imm8); |
|
3041 } |
|
3042 |
3033 void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) { |
3043 void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) { |
3034 assert(VM_Version::supports_sse4_1(), ""); |
3044 assert(VM_Version::supports_sse4_1(), ""); |
3035 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, /* no_mask_reg */ true, |
3045 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, /* no_mask_reg */ true, |
3036 VEX_OPCODE_0F_3A, /* rex_w */ false, AVX_128bit, /* legacy_mode */ _legacy_mode_dq); |
3046 VEX_OPCODE_0F_3A, /* rex_w */ false, AVX_128bit, /* legacy_mode */ _legacy_mode_dq); |
3037 emit_int8(0x22); |
3047 emit_int8(0x22); |
3042 void Assembler::pinsrq(XMMRegister dst, Register src, int imm8) { |
3052 void Assembler::pinsrq(XMMRegister dst, Register src, int imm8) { |
3043 assert(VM_Version::supports_sse4_1(), ""); |
3053 assert(VM_Version::supports_sse4_1(), ""); |
3044 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, /* no_mask_reg */ true, |
3054 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, /* no_mask_reg */ true, |
3045 VEX_OPCODE_0F_3A, /* rex_w */ true, AVX_128bit, /* legacy_mode */ _legacy_mode_dq); |
3055 VEX_OPCODE_0F_3A, /* rex_w */ true, AVX_128bit, /* legacy_mode */ _legacy_mode_dq); |
3046 emit_int8(0x22); |
3056 emit_int8(0x22); |
|
3057 emit_int8((unsigned char)(0xC0 | encode)); |
|
3058 emit_int8(imm8); |
|
3059 } |
|
3060 |
|
3061 void Assembler::pinsrw(XMMRegister dst, Register src, int imm8) { |
|
3062 assert(VM_Version::supports_sse2(), ""); |
|
3063 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, /* no_mask_reg */ true, |
|
3064 VEX_OPCODE_0F, /* rex_w */ false, AVX_128bit, /* legacy_mode */ _legacy_mode_bw); |
|
3065 emit_int8((unsigned char)0xC4); |
3047 emit_int8((unsigned char)(0xC0 | encode)); |
3066 emit_int8((unsigned char)(0xC0 | encode)); |
3048 emit_int8(imm8); |
3067 emit_int8(imm8); |
3049 } |
3068 } |
3050 |
3069 |
3051 void Assembler::pmovzxbw(XMMRegister dst, Address src) { |
3070 void Assembler::pmovzxbw(XMMRegister dst, Address src) { |
4061 } else { |
4080 } else { |
4062 emit_simd_arith(0x59, dst, src, VEX_SIMD_66); |
4081 emit_simd_arith(0x59, dst, src, VEX_SIMD_66); |
4063 } |
4082 } |
4064 } |
4083 } |
4065 |
4084 |
|
4085 void Assembler::mulpd(XMMRegister dst, Address src) { |
|
4086 _instruction_uses_vl = true; |
|
4087 NOT_LP64(assert(VM_Version::supports_sse2(), "")); |
|
4088 if (VM_Version::supports_evex()) { |
|
4089 emit_simd_arith_q(0x59, dst, src, VEX_SIMD_66); |
|
4090 } else { |
|
4091 emit_simd_arith(0x59, dst, src, VEX_SIMD_66); |
|
4092 } |
|
4093 } |
|
4094 |
4066 void Assembler::mulps(XMMRegister dst, XMMRegister src) { |
4095 void Assembler::mulps(XMMRegister dst, XMMRegister src) { |
4067 _instruction_uses_vl = true; |
4096 _instruction_uses_vl = true; |
4068 NOT_LP64(assert(VM_Version::supports_sse2(), "")); |
4097 NOT_LP64(assert(VM_Version::supports_sse2(), "")); |
4069 emit_simd_arith(0x59, dst, src, VEX_SIMD_NONE); |
4098 emit_simd_arith(0x59, dst, src, VEX_SIMD_NONE); |
4070 } |
4099 } |
4247 if (VM_Version::supports_evex()) { |
4276 if (VM_Version::supports_evex()) { |
4248 _tuple_type = EVEX_FV; |
4277 _tuple_type = EVEX_FV; |
4249 _input_size_in_bits = EVEX_32bit; |
4278 _input_size_in_bits = EVEX_32bit; |
4250 } |
4279 } |
4251 emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_NONE, vector_len, /* no_mask_reg */ false, /* legacy_mode */ _legacy_mode_dq); |
4280 emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_NONE, vector_len, /* no_mask_reg */ false, /* legacy_mode */ _legacy_mode_dq); |
|
4281 } |
|
4282 |
|
4283 void Assembler::unpckhpd(XMMRegister dst, XMMRegister src) { |
|
4284 _instruction_uses_vl = true; |
|
4285 NOT_LP64(assert(VM_Version::supports_sse2(), "")); |
|
4286 if (VM_Version::supports_evex()) { |
|
4287 emit_simd_arith_q(0x15, dst, src, VEX_SIMD_66); |
|
4288 } else { |
|
4289 emit_simd_arith(0x15, dst, src, VEX_SIMD_66); |
|
4290 } |
|
4291 } |
|
4292 |
|
4293 void Assembler::unpcklpd(XMMRegister dst, XMMRegister src) { |
|
4294 _instruction_uses_vl = true; |
|
4295 NOT_LP64(assert(VM_Version::supports_sse2(), "")); |
|
4296 if (VM_Version::supports_evex()) { |
|
4297 emit_simd_arith_q(0x14, dst, src, VEX_SIMD_66); |
|
4298 } else { |
|
4299 emit_simd_arith(0x14, dst, src, VEX_SIMD_66); |
|
4300 } |
4252 } |
4301 } |
4253 |
4302 |
4254 void Assembler::xorpd(XMMRegister dst, XMMRegister src) { |
4303 void Assembler::xorpd(XMMRegister dst, XMMRegister src) { |
4255 NOT_LP64(assert(VM_Version::supports_sse2(), "")); |
4304 NOT_LP64(assert(VM_Version::supports_sse2(), "")); |
4256 if (VM_Version::supports_avx512dq()) { |
4305 if (VM_Version::supports_avx512dq()) { |
4869 assert(UseAVX > 0, "requires some form of AVX"); |
4918 assert(UseAVX > 0, "requires some form of AVX"); |
4870 emit_vex_arith(0xE2, dst, src, shift, VEX_SIMD_66, vector_len); |
4919 emit_vex_arith(0xE2, dst, src, shift, VEX_SIMD_66, vector_len); |
4871 } |
4920 } |
4872 |
4921 |
4873 |
4922 |
4874 // AND packed integers |
4923 // logical operations packed integers |
4875 void Assembler::pand(XMMRegister dst, XMMRegister src) { |
4924 void Assembler::pand(XMMRegister dst, XMMRegister src) { |
|
4925 _instruction_uses_vl = true; |
4876 NOT_LP64(assert(VM_Version::supports_sse2(), "")); |
4926 NOT_LP64(assert(VM_Version::supports_sse2(), "")); |
4877 emit_simd_arith(0xDB, dst, src, VEX_SIMD_66); |
4927 emit_simd_arith(0xDB, dst, src, VEX_SIMD_66); |
4878 } |
4928 } |
4879 |
4929 |
4880 void Assembler::vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { |
4930 void Assembler::vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { |
4889 if (VM_Version::supports_evex()) { |
4939 if (VM_Version::supports_evex()) { |
4890 _tuple_type = EVEX_FV; |
4940 _tuple_type = EVEX_FV; |
4891 _input_size_in_bits = EVEX_32bit; |
4941 _input_size_in_bits = EVEX_32bit; |
4892 } |
4942 } |
4893 emit_vex_arith(0xDB, dst, nds, src, VEX_SIMD_66, vector_len); |
4943 emit_vex_arith(0xDB, dst, nds, src, VEX_SIMD_66, vector_len); |
|
4944 } |
|
4945 |
|
4946 void Assembler::pandn(XMMRegister dst, XMMRegister src) { |
|
4947 _instruction_uses_vl = true; |
|
4948 NOT_LP64(assert(VM_Version::supports_sse2(), "")); |
|
4949 if (VM_Version::supports_evex()) { |
|
4950 emit_simd_arith_q(0xDF, dst, src, VEX_SIMD_66); |
|
4951 } |
|
4952 else { |
|
4953 emit_simd_arith(0xDF, dst, src, VEX_SIMD_66); |
|
4954 } |
4894 } |
4955 } |
4895 |
4956 |
4896 void Assembler::por(XMMRegister dst, XMMRegister src) { |
4957 void Assembler::por(XMMRegister dst, XMMRegister src) { |
4897 _instruction_uses_vl = true; |
4958 _instruction_uses_vl = true; |
4898 NOT_LP64(assert(VM_Version::supports_sse2(), "")); |
4959 NOT_LP64(assert(VM_Version::supports_sse2(), "")); |