author | mdoerr |
Wed, 16 Oct 2019 11:52:56 +0200 | |
changeset 58643 | b381e5328461 |
parent 58516 | d376d86b0a01 |
child 58679 | 9c3209ff7550 |
permissions | -rw-r--r-- |
11429 | 1 |
// |
54022
ff399127078a
8217561: X86: Add floating-point Math.min/max intrinsics
bsrbnd
parents:
53639
diff
changeset
|
2 |
// Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved. |
11429 | 3 |
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
// |
|
5 |
// This code is free software; you can redistribute it and/or modify it |
|
6 |
// under the terms of the GNU General Public License version 2 only, as |
|
7 |
// published by the Free Software Foundation. |
|
8 |
// |
|
9 |
// This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
// version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
// accompanied this code). |
|
14 |
// |
|
15 |
// You should have received a copy of the GNU General Public License version |
|
16 |
// 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
// |
|
19 |
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 |
// or visit www.oracle.com if you need additional information or have any |
|
21 |
// questions. |
|
22 |
// |
|
23 |
// |
|
24 |
||
25 |
// X86 Common Architecture Description File |
|
26 |
||
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
27 |
//----------REGISTER DEFINITION BLOCK------------------------------------------ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
28 |
// This information is used by the matcher and the register allocator to |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
29 |
// describe individual registers and classes of registers within the target |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
30 |
// archtecture. |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
31 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
32 |
register %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
33 |
//----------Architecture Description Register Definitions---------------------- |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
34 |
// General Registers |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
35 |
// "reg_def" name ( register save type, C convention save type, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
36 |
// ideal register type, encoding ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
37 |
// Register Save Types: |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
38 |
// |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
39 |
// NS = No-Save: The register allocator assumes that these registers |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
40 |
// can be used without saving upon entry to the method, & |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
41 |
// that they do not need to be saved at call sites. |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
42 |
// |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
43 |
// SOC = Save-On-Call: The register allocator assumes that these registers |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
44 |
// can be used without saving upon entry to the method, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
45 |
// but that they must be saved at call sites. |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
46 |
// |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
47 |
// SOE = Save-On-Entry: The register allocator assumes that these registers |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
48 |
// must be saved before using them upon entry to the |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
49 |
// method, but they do not need to be saved at call |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
50 |
// sites. |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
51 |
// |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
52 |
// AS = Always-Save: The register allocator assumes that these registers |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
53 |
// must be saved before using them upon entry to the |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
54 |
// method, & that they must be saved at call sites. |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
55 |
// |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
56 |
// Ideal Register Type is used to determine how to save & restore a |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
57 |
// register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
58 |
// spilled with LoadP/StoreP. If the register supports both, use Op_RegI. |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
59 |
// |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
60 |
// The encoding number is the actual bit-pattern placed into the opcodes. |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
61 |
|
30624 | 62 |
// XMM registers. 512-bit registers or 8 words each, labeled (a)-p. |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
63 |
// Word a in each register holds a Float, words ab hold a Double. |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
64 |
// The whole registers are used in SSE4.2 version intrinsics, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
65 |
// array copy stubs and superword operations (see UseSSE42Intrinsics, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
66 |
// UseXMMForArrayCopy and UseSuperword flags). |
30624 | 67 |
// For pre EVEX enabled architectures: |
68 |
// XMM8-XMM15 must be encoded with REX (VEX for UseAVX) |
|
69 |
// For EVEX enabled architectures: |
|
70 |
// XMM8-XMM31 must be encoded with REX (EVEX for UseAVX). |
|
71 |
// |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
72 |
// Linux ABI: No register preserved across function calls |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
73 |
// XMM0-XMM7 might hold parameters |
30624 | 74 |
// Windows ABI: XMM6-XMM31 preserved across function calls |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
75 |
// XMM0-XMM3 might hold parameters |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
76 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
77 |
reg_def XMM0 ( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()); |
13294 | 78 |
reg_def XMM0b( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(1)); |
79 |
reg_def XMM0c( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(2)); |
|
80 |
reg_def XMM0d( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(3)); |
|
81 |
reg_def XMM0e( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(4)); |
|
82 |
reg_def XMM0f( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(5)); |
|
83 |
reg_def XMM0g( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(6)); |
|
84 |
reg_def XMM0h( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(7)); |
|
30624 | 85 |
reg_def XMM0i( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(8)); |
86 |
reg_def XMM0j( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(9)); |
|
87 |
reg_def XMM0k( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(10)); |
|
88 |
reg_def XMM0l( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(11)); |
|
89 |
reg_def XMM0m( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(12)); |
|
90 |
reg_def XMM0n( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(13)); |
|
91 |
reg_def XMM0o( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(14)); |
|
92 |
reg_def XMM0p( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(15)); |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
93 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
94 |
reg_def XMM1 ( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()); |
13294 | 95 |
reg_def XMM1b( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(1)); |
96 |
reg_def XMM1c( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(2)); |
|
97 |
reg_def XMM1d( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(3)); |
|
98 |
reg_def XMM1e( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(4)); |
|
99 |
reg_def XMM1f( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(5)); |
|
100 |
reg_def XMM1g( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(6)); |
|
101 |
reg_def XMM1h( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(7)); |
|
30624 | 102 |
reg_def XMM1i( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(8)); |
103 |
reg_def XMM1j( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(9)); |
|
104 |
reg_def XMM1k( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(10)); |
|
105 |
reg_def XMM1l( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(11)); |
|
106 |
reg_def XMM1m( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(12)); |
|
107 |
reg_def XMM1n( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(13)); |
|
108 |
reg_def XMM1o( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(14)); |
|
109 |
reg_def XMM1p( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(15)); |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
110 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
111 |
reg_def XMM2 ( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()); |
13294 | 112 |
reg_def XMM2b( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(1)); |
113 |
reg_def XMM2c( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(2)); |
|
114 |
reg_def XMM2d( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(3)); |
|
115 |
reg_def XMM2e( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(4)); |
|
116 |
reg_def XMM2f( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(5)); |
|
117 |
reg_def XMM2g( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(6)); |
|
118 |
reg_def XMM2h( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(7)); |
|
30624 | 119 |
reg_def XMM2i( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(8)); |
120 |
reg_def XMM2j( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(9)); |
|
121 |
reg_def XMM2k( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(10)); |
|
122 |
reg_def XMM2l( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(11)); |
|
123 |
reg_def XMM2m( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(12)); |
|
124 |
reg_def XMM2n( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(13)); |
|
125 |
reg_def XMM2o( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(14)); |
|
126 |
reg_def XMM2p( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(15)); |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
127 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
128 |
reg_def XMM3 ( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()); |
13294 | 129 |
reg_def XMM3b( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(1)); |
130 |
reg_def XMM3c( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(2)); |
|
131 |
reg_def XMM3d( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(3)); |
|
132 |
reg_def XMM3e( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(4)); |
|
133 |
reg_def XMM3f( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(5)); |
|
134 |
reg_def XMM3g( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(6)); |
|
135 |
reg_def XMM3h( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(7)); |
|
30624 | 136 |
reg_def XMM3i( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(8)); |
137 |
reg_def XMM3j( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(9)); |
|
138 |
reg_def XMM3k( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(10)); |
|
139 |
reg_def XMM3l( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(11)); |
|
140 |
reg_def XMM3m( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(12)); |
|
141 |
reg_def XMM3n( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(13)); |
|
142 |
reg_def XMM3o( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(14)); |
|
143 |
reg_def XMM3p( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(15)); |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
144 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
145 |
reg_def XMM4 ( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()); |
13294 | 146 |
reg_def XMM4b( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(1)); |
147 |
reg_def XMM4c( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(2)); |
|
148 |
reg_def XMM4d( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(3)); |
|
149 |
reg_def XMM4e( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(4)); |
|
150 |
reg_def XMM4f( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(5)); |
|
151 |
reg_def XMM4g( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(6)); |
|
152 |
reg_def XMM4h( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(7)); |
|
30624 | 153 |
reg_def XMM4i( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(8)); |
154 |
reg_def XMM4j( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(9)); |
|
155 |
reg_def XMM4k( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(10)); |
|
156 |
reg_def XMM4l( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(11)); |
|
157 |
reg_def XMM4m( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(12)); |
|
158 |
reg_def XMM4n( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(13)); |
|
159 |
reg_def XMM4o( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(14)); |
|
160 |
reg_def XMM4p( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(15)); |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
161 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
162 |
reg_def XMM5 ( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()); |
13294 | 163 |
reg_def XMM5b( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(1)); |
164 |
reg_def XMM5c( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(2)); |
|
165 |
reg_def XMM5d( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(3)); |
|
166 |
reg_def XMM5e( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(4)); |
|
167 |
reg_def XMM5f( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(5)); |
|
168 |
reg_def XMM5g( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(6)); |
|
169 |
reg_def XMM5h( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(7)); |
|
30624 | 170 |
reg_def XMM5i( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(8)); |
171 |
reg_def XMM5j( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(9)); |
|
172 |
reg_def XMM5k( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(10)); |
|
173 |
reg_def XMM5l( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(11)); |
|
174 |
reg_def XMM5m( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(12)); |
|
175 |
reg_def XMM5n( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(13)); |
|
176 |
reg_def XMM5o( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(14)); |
|
177 |
reg_def XMM5p( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(15)); |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
178 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
179 |
reg_def XMM6 ( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()); |
13294 | 180 |
reg_def XMM6b( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(1)); |
181 |
reg_def XMM6c( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(2)); |
|
182 |
reg_def XMM6d( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(3)); |
|
183 |
reg_def XMM6e( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(4)); |
|
184 |
reg_def XMM6f( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(5)); |
|
185 |
reg_def XMM6g( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(6)); |
|
186 |
reg_def XMM6h( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(7)); |
|
30624 | 187 |
reg_def XMM6i( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(8)); |
188 |
reg_def XMM6j( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(9)); |
|
189 |
reg_def XMM6k( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(10)); |
|
190 |
reg_def XMM6l( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(11)); |
|
191 |
reg_def XMM6m( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(12)); |
|
192 |
reg_def XMM6n( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(13)); |
|
193 |
reg_def XMM6o( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(14)); |
|
194 |
reg_def XMM6p( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(15)); |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
195 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
196 |
reg_def XMM7 ( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()); |
13294 | 197 |
reg_def XMM7b( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(1)); |
198 |
reg_def XMM7c( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(2)); |
|
199 |
reg_def XMM7d( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(3)); |
|
200 |
reg_def XMM7e( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(4)); |
|
201 |
reg_def XMM7f( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(5)); |
|
202 |
reg_def XMM7g( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(6)); |
|
203 |
reg_def XMM7h( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(7)); |
|
30624 | 204 |
reg_def XMM7i( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(8)); |
205 |
reg_def XMM7j( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(9)); |
|
206 |
reg_def XMM7k( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(10)); |
|
207 |
reg_def XMM7l( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(11)); |
|
208 |
reg_def XMM7m( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(12)); |
|
209 |
reg_def XMM7n( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(13)); |
|
210 |
reg_def XMM7o( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(14)); |
|
211 |
reg_def XMM7p( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(15)); |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
212 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
213 |
#ifdef _LP64 |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
214 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
215 |
reg_def XMM8 ( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()); |
13294 | 216 |
reg_def XMM8b( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(1)); |
217 |
reg_def XMM8c( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(2)); |
|
218 |
reg_def XMM8d( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(3)); |
|
219 |
reg_def XMM8e( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(4)); |
|
220 |
reg_def XMM8f( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(5)); |
|
221 |
reg_def XMM8g( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(6)); |
|
222 |
reg_def XMM8h( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(7)); |
|
30624 | 223 |
reg_def XMM8i( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(8)); |
224 |
reg_def XMM8j( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(9)); |
|
225 |
reg_def XMM8k( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(10)); |
|
226 |
reg_def XMM8l( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(11)); |
|
227 |
reg_def XMM8m( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(12)); |
|
228 |
reg_def XMM8n( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(13)); |
|
229 |
reg_def XMM8o( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(14)); |
|
230 |
reg_def XMM8p( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(15)); |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
231 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
232 |
reg_def XMM9 ( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()); |
13294 | 233 |
reg_def XMM9b( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(1)); |
234 |
reg_def XMM9c( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(2)); |
|
235 |
reg_def XMM9d( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(3)); |
|
236 |
reg_def XMM9e( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(4)); |
|
237 |
reg_def XMM9f( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(5)); |
|
238 |
reg_def XMM9g( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(6)); |
|
239 |
reg_def XMM9h( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(7)); |
|
30624 | 240 |
reg_def XMM9i( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(8)); |
241 |
reg_def XMM9j( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(9)); |
|
242 |
reg_def XMM9k( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(10)); |
|
243 |
reg_def XMM9l( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(11)); |
|
244 |
reg_def XMM9m( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(12)); |
|
245 |
reg_def XMM9n( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(13)); |
|
246 |
reg_def XMM9o( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(14)); |
|
247 |
reg_def XMM9p( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(15)); |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
248 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
249 |
reg_def XMM10 ( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()); |
13294 | 250 |
reg_def XMM10b( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(1)); |
251 |
reg_def XMM10c( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(2)); |
|
252 |
reg_def XMM10d( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(3)); |
|
253 |
reg_def XMM10e( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(4)); |
|
254 |
reg_def XMM10f( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(5)); |
|
255 |
reg_def XMM10g( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(6)); |
|
256 |
reg_def XMM10h( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(7)); |
|
30624 | 257 |
reg_def XMM10i( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(8)); |
258 |
reg_def XMM10j( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(9)); |
|
259 |
reg_def XMM10k( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(10)); |
|
260 |
reg_def XMM10l( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(11)); |
|
261 |
reg_def XMM10m( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(12)); |
|
262 |
reg_def XMM10n( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(13)); |
|
263 |
reg_def XMM10o( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(14)); |
|
264 |
reg_def XMM10p( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(15)); |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
265 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
266 |
reg_def XMM11 ( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()); |
13294 | 267 |
reg_def XMM11b( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(1)); |
268 |
reg_def XMM11c( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(2)); |
|
269 |
reg_def XMM11d( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(3)); |
|
270 |
reg_def XMM11e( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(4)); |
|
271 |
reg_def XMM11f( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(5)); |
|
272 |
reg_def XMM11g( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(6)); |
|
273 |
reg_def XMM11h( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(7)); |
|
30624 | 274 |
reg_def XMM11i( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(8)); |
275 |
reg_def XMM11j( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(9)); |
|
276 |
reg_def XMM11k( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(10)); |
|
277 |
reg_def XMM11l( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(11)); |
|
278 |
reg_def XMM11m( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(12)); |
|
279 |
reg_def XMM11n( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(13)); |
|
280 |
reg_def XMM11o( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(14)); |
|
281 |
reg_def XMM11p( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(15)); |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
282 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
283 |
reg_def XMM12 ( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()); |
13294 | 284 |
reg_def XMM12b( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(1)); |
285 |
reg_def XMM12c( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(2)); |
|
286 |
reg_def XMM12d( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(3)); |
|
287 |
reg_def XMM12e( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(4)); |
|
288 |
reg_def XMM12f( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(5)); |
|
289 |
reg_def XMM12g( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(6)); |
|
290 |
reg_def XMM12h( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(7)); |
|
30624 | 291 |
reg_def XMM12i( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(8)); |
292 |
reg_def XMM12j( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(9)); |
|
293 |
reg_def XMM12k( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(10)); |
|
294 |
reg_def XMM12l( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(11)); |
|
295 |
reg_def XMM12m( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(12)); |
|
296 |
reg_def XMM12n( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(13)); |
|
297 |
reg_def XMM12o( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(14)); |
|
298 |
reg_def XMM12p( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(15)); |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
299 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
300 |
reg_def XMM13 ( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()); |
13294 | 301 |
reg_def XMM13b( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(1)); |
302 |
reg_def XMM13c( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(2)); |
|
303 |
reg_def XMM13d( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(3)); |
|
304 |
reg_def XMM13e( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(4)); |
|
305 |
reg_def XMM13f( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(5)); |
|
306 |
reg_def XMM13g( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(6)); |
|
307 |
reg_def XMM13h( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(7)); |
|
30624 | 308 |
reg_def XMM13i( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(8)); |
309 |
reg_def XMM13j( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(9)); |
|
310 |
reg_def XMM13k( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(10)); |
|
311 |
reg_def XMM13l( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(11)); |
|
312 |
reg_def XMM13m( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(12)); |
|
313 |
reg_def XMM13n( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(13)); |
|
314 |
reg_def XMM13o( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(14)); |
|
315 |
reg_def XMM13p( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(15)); |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
316 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
317 |
reg_def XMM14 ( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()); |
13294 | 318 |
reg_def XMM14b( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(1)); |
319 |
reg_def XMM14c( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(2)); |
|
320 |
reg_def XMM14d( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(3)); |
|
321 |
reg_def XMM14e( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(4)); |
|
322 |
reg_def XMM14f( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(5)); |
|
323 |
reg_def XMM14g( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(6)); |
|
324 |
reg_def XMM14h( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(7)); |
|
30624 | 325 |
reg_def XMM14i( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(8)); |
326 |
reg_def XMM14j( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(9)); |
|
327 |
reg_def XMM14k( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(10)); |
|
328 |
reg_def XMM14l( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(11)); |
|
329 |
reg_def XMM14m( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(12)); |
|
330 |
reg_def XMM14n( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(13)); |
|
331 |
reg_def XMM14o( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(14)); |
|
332 |
reg_def XMM14p( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(15)); |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
333 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
334 |
reg_def XMM15 ( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()); |
13294 | 335 |
reg_def XMM15b( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(1)); |
336 |
reg_def XMM15c( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(2)); |
|
337 |
reg_def XMM15d( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(3)); |
|
338 |
reg_def XMM15e( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(4)); |
|
339 |
reg_def XMM15f( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(5)); |
|
340 |
reg_def XMM15g( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(6)); |
|
341 |
reg_def XMM15h( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(7)); |
|
30624 | 342 |
reg_def XMM15i( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(8)); |
343 |
reg_def XMM15j( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(9)); |
|
344 |
reg_def XMM15k( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(10)); |
|
345 |
reg_def XMM15l( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(11)); |
|
346 |
reg_def XMM15m( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(12)); |
|
347 |
reg_def XMM15n( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(13)); |
|
348 |
reg_def XMM15o( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(14)); |
|
349 |
reg_def XMM15p( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(15)); |
|
350 |
||
351 |
reg_def XMM16 ( SOC, SOC, Op_RegF, 16, xmm16->as_VMReg()); |
|
352 |
reg_def XMM16b( SOC, SOC, Op_RegF, 16, xmm16->as_VMReg()->next(1)); |
|
353 |
reg_def XMM16c( SOC, SOC, Op_RegF, 16, xmm16->as_VMReg()->next(2)); |
|
354 |
reg_def XMM16d( SOC, SOC, Op_RegF, 16, xmm16->as_VMReg()->next(3)); |
|
355 |
reg_def XMM16e( SOC, SOC, Op_RegF, 16, xmm16->as_VMReg()->next(4)); |
|
356 |
reg_def XMM16f( SOC, SOC, Op_RegF, 16, xmm16->as_VMReg()->next(5)); |
|
357 |
reg_def XMM16g( SOC, SOC, Op_RegF, 16, xmm16->as_VMReg()->next(6)); |
|
358 |
reg_def XMM16h( SOC, SOC, Op_RegF, 16, xmm16->as_VMReg()->next(7)); |
|
359 |
reg_def XMM16i( SOC, SOC, Op_RegF, 16, xmm16->as_VMReg()->next(8)); |
|
360 |
reg_def XMM16j( SOC, SOC, Op_RegF, 16, xmm16->as_VMReg()->next(9)); |
|
361 |
reg_def XMM16k( SOC, SOC, Op_RegF, 16, xmm16->as_VMReg()->next(10)); |
|
362 |
reg_def XMM16l( SOC, SOC, Op_RegF, 16, xmm16->as_VMReg()->next(11)); |
|
363 |
reg_def XMM16m( SOC, SOC, Op_RegF, 16, xmm16->as_VMReg()->next(12)); |
|
364 |
reg_def XMM16n( SOC, SOC, Op_RegF, 16, xmm16->as_VMReg()->next(13)); |
|
365 |
reg_def XMM16o( SOC, SOC, Op_RegF, 16, xmm16->as_VMReg()->next(14)); |
|
366 |
reg_def XMM16p( SOC, SOC, Op_RegF, 16, xmm16->as_VMReg()->next(15)); |
|
367 |
||
368 |
reg_def XMM17 ( SOC, SOC, Op_RegF, 17, xmm17->as_VMReg()); |
|
369 |
reg_def XMM17b( SOC, SOC, Op_RegF, 17, xmm17->as_VMReg()->next(1)); |
|
370 |
reg_def XMM17c( SOC, SOC, Op_RegF, 17, xmm17->as_VMReg()->next(2)); |
|
371 |
reg_def XMM17d( SOC, SOC, Op_RegF, 17, xmm17->as_VMReg()->next(3)); |
|
372 |
reg_def XMM17e( SOC, SOC, Op_RegF, 17, xmm17->as_VMReg()->next(4)); |
|
373 |
reg_def XMM17f( SOC, SOC, Op_RegF, 17, xmm17->as_VMReg()->next(5)); |
|
374 |
reg_def XMM17g( SOC, SOC, Op_RegF, 17, xmm17->as_VMReg()->next(6)); |
|
375 |
reg_def XMM17h( SOC, SOC, Op_RegF, 17, xmm17->as_VMReg()->next(7)); |
|
376 |
reg_def XMM17i( SOC, SOC, Op_RegF, 17, xmm17->as_VMReg()->next(8)); |
|
377 |
reg_def XMM17j( SOC, SOC, Op_RegF, 17, xmm17->as_VMReg()->next(9)); |
|
378 |
reg_def XMM17k( SOC, SOC, Op_RegF, 17, xmm17->as_VMReg()->next(10)); |
|
379 |
reg_def XMM17l( SOC, SOC, Op_RegF, 17, xmm17->as_VMReg()->next(11)); |
|
380 |
reg_def XMM17m( SOC, SOC, Op_RegF, 17, xmm17->as_VMReg()->next(12)); |
|
381 |
reg_def XMM17n( SOC, SOC, Op_RegF, 17, xmm17->as_VMReg()->next(13)); |
|
382 |
reg_def XMM17o( SOC, SOC, Op_RegF, 17, xmm17->as_VMReg()->next(14)); |
|
383 |
reg_def XMM17p( SOC, SOC, Op_RegF, 17, xmm17->as_VMReg()->next(15)); |
|
384 |
||
385 |
reg_def XMM18 ( SOC, SOC, Op_RegF, 18, xmm18->as_VMReg()); |
|
386 |
reg_def XMM18b( SOC, SOC, Op_RegF, 18, xmm18->as_VMReg()->next(1)); |
|
387 |
reg_def XMM18c( SOC, SOC, Op_RegF, 18, xmm18->as_VMReg()->next(2)); |
|
388 |
reg_def XMM18d( SOC, SOC, Op_RegF, 18, xmm18->as_VMReg()->next(3)); |
|
389 |
reg_def XMM18e( SOC, SOC, Op_RegF, 18, xmm18->as_VMReg()->next(4)); |
|
390 |
reg_def XMM18f( SOC, SOC, Op_RegF, 18, xmm18->as_VMReg()->next(5)); |
|
391 |
reg_def XMM18g( SOC, SOC, Op_RegF, 18, xmm18->as_VMReg()->next(6)); |
|
392 |
reg_def XMM18h( SOC, SOC, Op_RegF, 18, xmm18->as_VMReg()->next(7)); |
|
393 |
reg_def XMM18i( SOC, SOC, Op_RegF, 18, xmm18->as_VMReg()->next(8)); |
|
394 |
reg_def XMM18j( SOC, SOC, Op_RegF, 18, xmm18->as_VMReg()->next(9)); |
|
395 |
reg_def XMM18k( SOC, SOC, Op_RegF, 18, xmm18->as_VMReg()->next(10)); |
|
396 |
reg_def XMM18l( SOC, SOC, Op_RegF, 18, xmm18->as_VMReg()->next(11)); |
|
397 |
reg_def XMM18m( SOC, SOC, Op_RegF, 18, xmm18->as_VMReg()->next(12)); |
|
398 |
reg_def XMM18n( SOC, SOC, Op_RegF, 18, xmm18->as_VMReg()->next(13)); |
|
399 |
reg_def XMM18o( SOC, SOC, Op_RegF, 18, xmm18->as_VMReg()->next(14)); |
|
400 |
reg_def XMM18p( SOC, SOC, Op_RegF, 18, xmm18->as_VMReg()->next(15)); |
|
401 |
||
402 |
reg_def XMM19 ( SOC, SOC, Op_RegF, 19, xmm19->as_VMReg()); |
|
403 |
reg_def XMM19b( SOC, SOC, Op_RegF, 19, xmm19->as_VMReg()->next(1)); |
|
404 |
reg_def XMM19c( SOC, SOC, Op_RegF, 19, xmm19->as_VMReg()->next(2)); |
|
405 |
reg_def XMM19d( SOC, SOC, Op_RegF, 19, xmm19->as_VMReg()->next(3)); |
|
406 |
reg_def XMM19e( SOC, SOC, Op_RegF, 19, xmm19->as_VMReg()->next(4)); |
|
407 |
reg_def XMM19f( SOC, SOC, Op_RegF, 19, xmm19->as_VMReg()->next(5)); |
|
408 |
reg_def XMM19g( SOC, SOC, Op_RegF, 19, xmm19->as_VMReg()->next(6)); |
|
409 |
reg_def XMM19h( SOC, SOC, Op_RegF, 19, xmm19->as_VMReg()->next(7)); |
|
410 |
reg_def XMM19i( SOC, SOC, Op_RegF, 19, xmm19->as_VMReg()->next(8)); |
|
411 |
reg_def XMM19j( SOC, SOC, Op_RegF, 19, xmm19->as_VMReg()->next(9)); |
|
412 |
reg_def XMM19k( SOC, SOC, Op_RegF, 19, xmm19->as_VMReg()->next(10)); |
|
413 |
reg_def XMM19l( SOC, SOC, Op_RegF, 19, xmm19->as_VMReg()->next(11)); |
|
414 |
reg_def XMM19m( SOC, SOC, Op_RegF, 19, xmm19->as_VMReg()->next(12)); |
|
415 |
reg_def XMM19n( SOC, SOC, Op_RegF, 19, xmm19->as_VMReg()->next(13)); |
|
416 |
reg_def XMM19o( SOC, SOC, Op_RegF, 19, xmm19->as_VMReg()->next(14)); |
|
417 |
reg_def XMM19p( SOC, SOC, Op_RegF, 19, xmm19->as_VMReg()->next(15)); |
|
418 |
||
419 |
reg_def XMM20 ( SOC, SOC, Op_RegF, 20, xmm20->as_VMReg()); |
|
420 |
reg_def XMM20b( SOC, SOC, Op_RegF, 20, xmm20->as_VMReg()->next(1)); |
|
421 |
reg_def XMM20c( SOC, SOC, Op_RegF, 20, xmm20->as_VMReg()->next(2)); |
|
422 |
reg_def XMM20d( SOC, SOC, Op_RegF, 20, xmm20->as_VMReg()->next(3)); |
|
423 |
reg_def XMM20e( SOC, SOC, Op_RegF, 20, xmm20->as_VMReg()->next(4)); |
|
424 |
reg_def XMM20f( SOC, SOC, Op_RegF, 20, xmm20->as_VMReg()->next(5)); |
|
425 |
reg_def XMM20g( SOC, SOC, Op_RegF, 20, xmm20->as_VMReg()->next(6)); |
|
426 |
reg_def XMM20h( SOC, SOC, Op_RegF, 20, xmm20->as_VMReg()->next(7)); |
|
427 |
reg_def XMM20i( SOC, SOC, Op_RegF, 20, xmm20->as_VMReg()->next(8)); |
|
428 |
reg_def XMM20j( SOC, SOC, Op_RegF, 20, xmm20->as_VMReg()->next(9)); |
|
429 |
reg_def XMM20k( SOC, SOC, Op_RegF, 20, xmm20->as_VMReg()->next(10)); |
|
430 |
reg_def XMM20l( SOC, SOC, Op_RegF, 20, xmm20->as_VMReg()->next(11)); |
|
431 |
reg_def XMM20m( SOC, SOC, Op_RegF, 20, xmm20->as_VMReg()->next(12)); |
|
432 |
reg_def XMM20n( SOC, SOC, Op_RegF, 20, xmm20->as_VMReg()->next(13)); |
|
433 |
reg_def XMM20o( SOC, SOC, Op_RegF, 20, xmm20->as_VMReg()->next(14)); |
|
434 |
reg_def XMM20p( SOC, SOC, Op_RegF, 20, xmm20->as_VMReg()->next(15)); |
|
435 |
||
436 |
reg_def XMM21 ( SOC, SOC, Op_RegF, 21, xmm21->as_VMReg()); |
|
437 |
reg_def XMM21b( SOC, SOC, Op_RegF, 21, xmm21->as_VMReg()->next(1)); |
|
438 |
reg_def XMM21c( SOC, SOC, Op_RegF, 21, xmm21->as_VMReg()->next(2)); |
|
439 |
reg_def XMM21d( SOC, SOC, Op_RegF, 21, xmm21->as_VMReg()->next(3)); |
|
440 |
reg_def XMM21e( SOC, SOC, Op_RegF, 21, xmm21->as_VMReg()->next(4)); |
|
441 |
reg_def XMM21f( SOC, SOC, Op_RegF, 21, xmm21->as_VMReg()->next(5)); |
|
442 |
reg_def XMM21g( SOC, SOC, Op_RegF, 21, xmm21->as_VMReg()->next(6)); |
|
443 |
reg_def XMM21h( SOC, SOC, Op_RegF, 21, xmm21->as_VMReg()->next(7)); |
|
444 |
reg_def XMM21i( SOC, SOC, Op_RegF, 21, xmm21->as_VMReg()->next(8)); |
|
445 |
reg_def XMM21j( SOC, SOC, Op_RegF, 21, xmm21->as_VMReg()->next(9)); |
|
446 |
reg_def XMM21k( SOC, SOC, Op_RegF, 21, xmm21->as_VMReg()->next(10)); |
|
447 |
reg_def XMM21l( SOC, SOC, Op_RegF, 21, xmm21->as_VMReg()->next(11)); |
|
448 |
reg_def XMM21m( SOC, SOC, Op_RegF, 21, xmm21->as_VMReg()->next(12)); |
|
449 |
reg_def XMM21n( SOC, SOC, Op_RegF, 21, xmm21->as_VMReg()->next(13)); |
|
450 |
reg_def XMM21o( SOC, SOC, Op_RegF, 21, xmm21->as_VMReg()->next(14)); |
|
451 |
reg_def XMM21p( SOC, SOC, Op_RegF, 21, xmm21->as_VMReg()->next(15)); |
|
452 |
||
453 |
reg_def XMM22 ( SOC, SOC, Op_RegF, 22, xmm22->as_VMReg()); |
|
454 |
reg_def XMM22b( SOC, SOC, Op_RegF, 22, xmm22->as_VMReg()->next(1)); |
|
455 |
reg_def XMM22c( SOC, SOC, Op_RegF, 22, xmm22->as_VMReg()->next(2)); |
|
456 |
reg_def XMM22d( SOC, SOC, Op_RegF, 22, xmm22->as_VMReg()->next(3)); |
|
457 |
reg_def XMM22e( SOC, SOC, Op_RegF, 22, xmm22->as_VMReg()->next(4)); |
|
458 |
reg_def XMM22f( SOC, SOC, Op_RegF, 22, xmm22->as_VMReg()->next(5)); |
|
459 |
reg_def XMM22g( SOC, SOC, Op_RegF, 22, xmm22->as_VMReg()->next(6)); |
|
460 |
reg_def XMM22h( SOC, SOC, Op_RegF, 22, xmm22->as_VMReg()->next(7)); |
|
461 |
reg_def XMM22i( SOC, SOC, Op_RegF, 22, xmm22->as_VMReg()->next(8)); |
|
462 |
reg_def XMM22j( SOC, SOC, Op_RegF, 22, xmm22->as_VMReg()->next(9)); |
|
463 |
reg_def XMM22k( SOC, SOC, Op_RegF, 22, xmm22->as_VMReg()->next(10)); |
|
464 |
reg_def XMM22l( SOC, SOC, Op_RegF, 22, xmm22->as_VMReg()->next(11)); |
|
465 |
reg_def XMM22m( SOC, SOC, Op_RegF, 22, xmm22->as_VMReg()->next(12)); |
|
466 |
reg_def XMM22n( SOC, SOC, Op_RegF, 22, xmm22->as_VMReg()->next(13)); |
|
467 |
reg_def XMM22o( SOC, SOC, Op_RegF, 22, xmm22->as_VMReg()->next(14)); |
|
468 |
reg_def XMM22p( SOC, SOC, Op_RegF, 22, xmm22->as_VMReg()->next(15)); |
|
469 |
||
470 |
reg_def XMM23 ( SOC, SOC, Op_RegF, 23, xmm23->as_VMReg()); |
|
471 |
reg_def XMM23b( SOC, SOC, Op_RegF, 23, xmm23->as_VMReg()->next(1)); |
|
472 |
reg_def XMM23c( SOC, SOC, Op_RegF, 23, xmm23->as_VMReg()->next(2)); |
|
473 |
reg_def XMM23d( SOC, SOC, Op_RegF, 23, xmm23->as_VMReg()->next(3)); |
|
474 |
reg_def XMM23e( SOC, SOC, Op_RegF, 23, xmm23->as_VMReg()->next(4)); |
|
475 |
reg_def XMM23f( SOC, SOC, Op_RegF, 23, xmm23->as_VMReg()->next(5)); |
|
476 |
reg_def XMM23g( SOC, SOC, Op_RegF, 23, xmm23->as_VMReg()->next(6)); |
|
477 |
reg_def XMM23h( SOC, SOC, Op_RegF, 23, xmm23->as_VMReg()->next(7)); |
|
478 |
reg_def XMM23i( SOC, SOC, Op_RegF, 23, xmm23->as_VMReg()->next(8)); |
|
479 |
reg_def XMM23j( SOC, SOC, Op_RegF, 23, xmm23->as_VMReg()->next(9)); |
|
480 |
reg_def XMM23k( SOC, SOC, Op_RegF, 23, xmm23->as_VMReg()->next(10)); |
|
481 |
reg_def XMM23l( SOC, SOC, Op_RegF, 23, xmm23->as_VMReg()->next(11)); |
|
482 |
reg_def XMM23m( SOC, SOC, Op_RegF, 23, xmm23->as_VMReg()->next(12)); |
|
483 |
reg_def XMM23n( SOC, SOC, Op_RegF, 23, xmm23->as_VMReg()->next(13)); |
|
484 |
reg_def XMM23o( SOC, SOC, Op_RegF, 23, xmm23->as_VMReg()->next(14)); |
|
485 |
reg_def XMM23p( SOC, SOC, Op_RegF, 23, xmm23->as_VMReg()->next(15)); |
|
486 |
||
487 |
reg_def XMM24 ( SOC, SOC, Op_RegF, 24, xmm24->as_VMReg()); |
|
488 |
reg_def XMM24b( SOC, SOC, Op_RegF, 24, xmm24->as_VMReg()->next(1)); |
|
489 |
reg_def XMM24c( SOC, SOC, Op_RegF, 24, xmm24->as_VMReg()->next(2)); |
|
490 |
reg_def XMM24d( SOC, SOC, Op_RegF, 24, xmm24->as_VMReg()->next(3)); |
|
491 |
reg_def XMM24e( SOC, SOC, Op_RegF, 24, xmm24->as_VMReg()->next(4)); |
|
492 |
reg_def XMM24f( SOC, SOC, Op_RegF, 24, xmm24->as_VMReg()->next(5)); |
|
493 |
reg_def XMM24g( SOC, SOC, Op_RegF, 24, xmm24->as_VMReg()->next(6)); |
|
494 |
reg_def XMM24h( SOC, SOC, Op_RegF, 24, xmm24->as_VMReg()->next(7)); |
|
495 |
reg_def XMM24i( SOC, SOC, Op_RegF, 24, xmm24->as_VMReg()->next(8)); |
|
496 |
reg_def XMM24j( SOC, SOC, Op_RegF, 24, xmm24->as_VMReg()->next(9)); |
|
497 |
reg_def XMM24k( SOC, SOC, Op_RegF, 24, xmm24->as_VMReg()->next(10)); |
|
498 |
reg_def XMM24l( SOC, SOC, Op_RegF, 24, xmm24->as_VMReg()->next(11)); |
|
499 |
reg_def XMM24m( SOC, SOC, Op_RegF, 24, xmm24->as_VMReg()->next(12)); |
|
500 |
reg_def XMM24n( SOC, SOC, Op_RegF, 24, xmm24->as_VMReg()->next(13)); |
|
501 |
reg_def XMM24o( SOC, SOC, Op_RegF, 24, xmm24->as_VMReg()->next(14)); |
|
502 |
reg_def XMM24p( SOC, SOC, Op_RegF, 24, xmm24->as_VMReg()->next(15)); |
|
503 |
||
504 |
reg_def XMM25 ( SOC, SOC, Op_RegF, 25, xmm25->as_VMReg()); |
|
505 |
reg_def XMM25b( SOC, SOC, Op_RegF, 25, xmm25->as_VMReg()->next(1)); |
|
506 |
reg_def XMM25c( SOC, SOC, Op_RegF, 25, xmm25->as_VMReg()->next(2)); |
|
507 |
reg_def XMM25d( SOC, SOC, Op_RegF, 25, xmm25->as_VMReg()->next(3)); |
|
508 |
reg_def XMM25e( SOC, SOC, Op_RegF, 25, xmm25->as_VMReg()->next(4)); |
|
509 |
reg_def XMM25f( SOC, SOC, Op_RegF, 25, xmm25->as_VMReg()->next(5)); |
|
510 |
reg_def XMM25g( SOC, SOC, Op_RegF, 25, xmm25->as_VMReg()->next(6)); |
|
511 |
reg_def XMM25h( SOC, SOC, Op_RegF, 25, xmm25->as_VMReg()->next(7)); |
|
512 |
reg_def XMM25i( SOC, SOC, Op_RegF, 25, xmm25->as_VMReg()->next(8)); |
|
513 |
reg_def XMM25j( SOC, SOC, Op_RegF, 25, xmm25->as_VMReg()->next(9)); |
|
514 |
reg_def XMM25k( SOC, SOC, Op_RegF, 25, xmm25->as_VMReg()->next(10)); |
|
515 |
reg_def XMM25l( SOC, SOC, Op_RegF, 25, xmm25->as_VMReg()->next(11)); |
|
516 |
reg_def XMM25m( SOC, SOC, Op_RegF, 25, xmm25->as_VMReg()->next(12)); |
|
517 |
reg_def XMM25n( SOC, SOC, Op_RegF, 25, xmm25->as_VMReg()->next(13)); |
|
518 |
reg_def XMM25o( SOC, SOC, Op_RegF, 25, xmm25->as_VMReg()->next(14)); |
|
519 |
reg_def XMM25p( SOC, SOC, Op_RegF, 25, xmm25->as_VMReg()->next(15)); |
|
520 |
||
521 |
reg_def XMM26 ( SOC, SOC, Op_RegF, 26, xmm26->as_VMReg()); |
|
522 |
reg_def XMM26b( SOC, SOC, Op_RegF, 26, xmm26->as_VMReg()->next(1)); |
|
523 |
reg_def XMM26c( SOC, SOC, Op_RegF, 26, xmm26->as_VMReg()->next(2)); |
|
524 |
reg_def XMM26d( SOC, SOC, Op_RegF, 26, xmm26->as_VMReg()->next(3)); |
|
525 |
reg_def XMM26e( SOC, SOC, Op_RegF, 26, xmm26->as_VMReg()->next(4)); |
|
526 |
reg_def XMM26f( SOC, SOC, Op_RegF, 26, xmm26->as_VMReg()->next(5)); |
|
527 |
reg_def XMM26g( SOC, SOC, Op_RegF, 26, xmm26->as_VMReg()->next(6)); |
|
528 |
reg_def XMM26h( SOC, SOC, Op_RegF, 26, xmm26->as_VMReg()->next(7)); |
|
529 |
reg_def XMM26i( SOC, SOC, Op_RegF, 26, xmm26->as_VMReg()->next(8)); |
|
530 |
reg_def XMM26j( SOC, SOC, Op_RegF, 26, xmm26->as_VMReg()->next(9)); |
|
531 |
reg_def XMM26k( SOC, SOC, Op_RegF, 26, xmm26->as_VMReg()->next(10)); |
|
532 |
reg_def XMM26l( SOC, SOC, Op_RegF, 26, xmm26->as_VMReg()->next(11)); |
|
533 |
reg_def XMM26m( SOC, SOC, Op_RegF, 26, xmm26->as_VMReg()->next(12)); |
|
534 |
reg_def XMM26n( SOC, SOC, Op_RegF, 26, xmm26->as_VMReg()->next(13)); |
|
535 |
reg_def XMM26o( SOC, SOC, Op_RegF, 26, xmm26->as_VMReg()->next(14)); |
|
536 |
reg_def XMM26p( SOC, SOC, Op_RegF, 26, xmm26->as_VMReg()->next(15)); |
|
537 |
||
538 |
reg_def XMM27 ( SOC, SOC, Op_RegF, 27, xmm27->as_VMReg()); |
|
539 |
reg_def XMM27b( SOC, SOC, Op_RegF, 27, xmm27->as_VMReg()->next(1)); |
|
540 |
reg_def XMM27c( SOC, SOC, Op_RegF, 27, xmm27->as_VMReg()->next(2)); |
|
541 |
reg_def XMM27d( SOC, SOC, Op_RegF, 27, xmm27->as_VMReg()->next(3)); |
|
542 |
reg_def XMM27e( SOC, SOC, Op_RegF, 27, xmm27->as_VMReg()->next(4)); |
|
543 |
reg_def XMM27f( SOC, SOC, Op_RegF, 27, xmm27->as_VMReg()->next(5)); |
|
544 |
reg_def XMM27g( SOC, SOC, Op_RegF, 27, xmm27->as_VMReg()->next(6)); |
|
545 |
reg_def XMM27h( SOC, SOC, Op_RegF, 27, xmm27->as_VMReg()->next(7)); |
|
546 |
reg_def XMM27i( SOC, SOC, Op_RegF, 27, xmm27->as_VMReg()->next(8)); |
|
547 |
reg_def XMM27j( SOC, SOC, Op_RegF, 27, xmm27->as_VMReg()->next(9)); |
|
548 |
reg_def XMM27k( SOC, SOC, Op_RegF, 27, xmm27->as_VMReg()->next(10)); |
|
549 |
reg_def XMM27l( SOC, SOC, Op_RegF, 27, xmm27->as_VMReg()->next(11)); |
|
550 |
reg_def XMM27m( SOC, SOC, Op_RegF, 27, xmm27->as_VMReg()->next(12)); |
|
551 |
reg_def XMM27n( SOC, SOC, Op_RegF, 27, xmm27->as_VMReg()->next(13)); |
|
552 |
reg_def XMM27o( SOC, SOC, Op_RegF, 27, xmm27->as_VMReg()->next(14)); |
|
553 |
reg_def XMM27p( SOC, SOC, Op_RegF, 27, xmm27->as_VMReg()->next(15)); |
|
554 |
||
555 |
reg_def XMM28 ( SOC, SOC, Op_RegF, 28, xmm28->as_VMReg()); |
|
556 |
reg_def XMM28b( SOC, SOC, Op_RegF, 28, xmm28->as_VMReg()->next(1)); |
|
557 |
reg_def XMM28c( SOC, SOC, Op_RegF, 28, xmm28->as_VMReg()->next(2)); |
|
558 |
reg_def XMM28d( SOC, SOC, Op_RegF, 28, xmm28->as_VMReg()->next(3)); |
|
559 |
reg_def XMM28e( SOC, SOC, Op_RegF, 28, xmm28->as_VMReg()->next(4)); |
|
560 |
reg_def XMM28f( SOC, SOC, Op_RegF, 28, xmm28->as_VMReg()->next(5)); |
|
561 |
reg_def XMM28g( SOC, SOC, Op_RegF, 28, xmm28->as_VMReg()->next(6)); |
|
562 |
reg_def XMM28h( SOC, SOC, Op_RegF, 28, xmm28->as_VMReg()->next(7)); |
|
563 |
reg_def XMM28i( SOC, SOC, Op_RegF, 28, xmm28->as_VMReg()->next(8)); |
|
564 |
reg_def XMM28j( SOC, SOC, Op_RegF, 28, xmm28->as_VMReg()->next(9)); |
|
565 |
reg_def XMM28k( SOC, SOC, Op_RegF, 28, xmm28->as_VMReg()->next(10)); |
|
566 |
reg_def XMM28l( SOC, SOC, Op_RegF, 28, xmm28->as_VMReg()->next(11)); |
|
567 |
reg_def XMM28m( SOC, SOC, Op_RegF, 28, xmm28->as_VMReg()->next(12)); |
|
568 |
reg_def XMM28n( SOC, SOC, Op_RegF, 28, xmm28->as_VMReg()->next(13)); |
|
569 |
reg_def XMM28o( SOC, SOC, Op_RegF, 28, xmm28->as_VMReg()->next(14)); |
|
570 |
reg_def XMM28p( SOC, SOC, Op_RegF, 28, xmm28->as_VMReg()->next(15)); |
|
571 |
||
572 |
reg_def XMM29 ( SOC, SOC, Op_RegF, 29, xmm29->as_VMReg()); |
|
573 |
reg_def XMM29b( SOC, SOC, Op_RegF, 29, xmm29->as_VMReg()->next(1)); |
|
574 |
reg_def XMM29c( SOC, SOC, Op_RegF, 29, xmm29->as_VMReg()->next(2)); |
|
575 |
reg_def XMM29d( SOC, SOC, Op_RegF, 29, xmm29->as_VMReg()->next(3)); |
|
576 |
reg_def XMM29e( SOC, SOC, Op_RegF, 29, xmm29->as_VMReg()->next(4)); |
|
577 |
reg_def XMM29f( SOC, SOC, Op_RegF, 29, xmm29->as_VMReg()->next(5)); |
|
578 |
reg_def XMM29g( SOC, SOC, Op_RegF, 29, xmm29->as_VMReg()->next(6)); |
|
579 |
reg_def XMM29h( SOC, SOC, Op_RegF, 29, xmm29->as_VMReg()->next(7)); |
|
580 |
reg_def XMM29i( SOC, SOC, Op_RegF, 29, xmm29->as_VMReg()->next(8)); |
|
581 |
reg_def XMM29j( SOC, SOC, Op_RegF, 29, xmm29->as_VMReg()->next(9)); |
|
582 |
reg_def XMM29k( SOC, SOC, Op_RegF, 29, xmm29->as_VMReg()->next(10)); |
|
583 |
reg_def XMM29l( SOC, SOC, Op_RegF, 29, xmm29->as_VMReg()->next(11)); |
|
584 |
reg_def XMM29m( SOC, SOC, Op_RegF, 29, xmm29->as_VMReg()->next(12)); |
|
585 |
reg_def XMM29n( SOC, SOC, Op_RegF, 29, xmm29->as_VMReg()->next(13)); |
|
586 |
reg_def XMM29o( SOC, SOC, Op_RegF, 29, xmm29->as_VMReg()->next(14)); |
|
587 |
reg_def XMM29p( SOC, SOC, Op_RegF, 29, xmm29->as_VMReg()->next(15)); |
|
588 |
||
589 |
reg_def XMM30 ( SOC, SOC, Op_RegF, 30, xmm30->as_VMReg()); |
|
590 |
reg_def XMM30b( SOC, SOC, Op_RegF, 30, xmm30->as_VMReg()->next(1)); |
|
591 |
reg_def XMM30c( SOC, SOC, Op_RegF, 30, xmm30->as_VMReg()->next(2)); |
|
592 |
reg_def XMM30d( SOC, SOC, Op_RegF, 30, xmm30->as_VMReg()->next(3)); |
|
593 |
reg_def XMM30e( SOC, SOC, Op_RegF, 30, xmm30->as_VMReg()->next(4)); |
|
594 |
reg_def XMM30f( SOC, SOC, Op_RegF, 30, xmm30->as_VMReg()->next(5)); |
|
595 |
reg_def XMM30g( SOC, SOC, Op_RegF, 30, xmm30->as_VMReg()->next(6)); |
|
596 |
reg_def XMM30h( SOC, SOC, Op_RegF, 30, xmm30->as_VMReg()->next(7)); |
|
597 |
reg_def XMM30i( SOC, SOC, Op_RegF, 30, xmm30->as_VMReg()->next(8)); |
|
598 |
reg_def XMM30j( SOC, SOC, Op_RegF, 30, xmm30->as_VMReg()->next(9)); |
|
599 |
reg_def XMM30k( SOC, SOC, Op_RegF, 30, xmm30->as_VMReg()->next(10)); |
|
600 |
reg_def XMM30l( SOC, SOC, Op_RegF, 30, xmm30->as_VMReg()->next(11)); |
|
601 |
reg_def XMM30m( SOC, SOC, Op_RegF, 30, xmm30->as_VMReg()->next(12)); |
|
602 |
reg_def XMM30n( SOC, SOC, Op_RegF, 30, xmm30->as_VMReg()->next(13)); |
|
603 |
reg_def XMM30o( SOC, SOC, Op_RegF, 30, xmm30->as_VMReg()->next(14)); |
|
604 |
reg_def XMM30p( SOC, SOC, Op_RegF, 30, xmm30->as_VMReg()->next(15)); |
|
605 |
||
606 |
reg_def XMM31 ( SOC, SOC, Op_RegF, 31, xmm31->as_VMReg()); |
|
607 |
reg_def XMM31b( SOC, SOC, Op_RegF, 31, xmm31->as_VMReg()->next(1)); |
|
608 |
reg_def XMM31c( SOC, SOC, Op_RegF, 31, xmm31->as_VMReg()->next(2)); |
|
609 |
reg_def XMM31d( SOC, SOC, Op_RegF, 31, xmm31->as_VMReg()->next(3)); |
|
610 |
reg_def XMM31e( SOC, SOC, Op_RegF, 31, xmm31->as_VMReg()->next(4)); |
|
611 |
reg_def XMM31f( SOC, SOC, Op_RegF, 31, xmm31->as_VMReg()->next(5)); |
|
612 |
reg_def XMM31g( SOC, SOC, Op_RegF, 31, xmm31->as_VMReg()->next(6)); |
|
613 |
reg_def XMM31h( SOC, SOC, Op_RegF, 31, xmm31->as_VMReg()->next(7)); |
|
614 |
reg_def XMM31i( SOC, SOC, Op_RegF, 31, xmm31->as_VMReg()->next(8)); |
|
615 |
reg_def XMM31j( SOC, SOC, Op_RegF, 31, xmm31->as_VMReg()->next(9)); |
|
616 |
reg_def XMM31k( SOC, SOC, Op_RegF, 31, xmm31->as_VMReg()->next(10)); |
|
617 |
reg_def XMM31l( SOC, SOC, Op_RegF, 31, xmm31->as_VMReg()->next(11)); |
|
618 |
reg_def XMM31m( SOC, SOC, Op_RegF, 31, xmm31->as_VMReg()->next(12)); |
|
619 |
reg_def XMM31n( SOC, SOC, Op_RegF, 31, xmm31->as_VMReg()->next(13)); |
|
620 |
reg_def XMM31o( SOC, SOC, Op_RegF, 31, xmm31->as_VMReg()->next(14)); |
|
621 |
reg_def XMM31p( SOC, SOC, Op_RegF, 31, xmm31->as_VMReg()->next(15)); |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
622 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
623 |
#endif // _LP64 |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
624 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
625 |
#ifdef _LP64 |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
626 |
reg_def RFLAGS(SOC, SOC, 0, 16, VMRegImpl::Bad()); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
627 |
#else |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
628 |
reg_def RFLAGS(SOC, SOC, 0, 8, VMRegImpl::Bad()); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
629 |
#endif // _LP64 |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
630 |
|
30624 | 631 |
alloc_class chunk1(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h, XMM0i, XMM0j, XMM0k, XMM0l, XMM0m, XMM0n, XMM0o, XMM0p, |
632 |
XMM1, XMM1b, XMM1c, XMM1d, XMM1e, XMM1f, XMM1g, XMM1h, XMM1i, XMM1j, XMM1k, XMM1l, XMM1m, XMM1n, XMM1o, XMM1p, |
|
633 |
XMM2, XMM2b, XMM2c, XMM2d, XMM2e, XMM2f, XMM2g, XMM2h, XMM2i, XMM2j, XMM2k, XMM2l, XMM2m, XMM2n, XMM2o, XMM2p, |
|
634 |
XMM3, XMM3b, XMM3c, XMM3d, XMM3e, XMM3f, XMM3g, XMM3h, XMM3i, XMM3j, XMM3k, XMM3l, XMM3m, XMM3n, XMM3o, XMM3p, |
|
635 |
XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h, XMM4i, XMM4j, XMM4k, XMM4l, XMM4m, XMM4n, XMM4o, XMM4p, |
|
636 |
XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h, XMM5i, XMM5j, XMM5k, XMM5l, XMM5m, XMM5n, XMM5o, XMM5p, |
|
637 |
XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h, XMM6i, XMM6j, XMM6k, XMM6l, XMM6m, XMM6n, XMM6o, XMM6p, |
|
638 |
XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h, XMM7i, XMM7j, XMM7k, XMM7l, XMM7m, XMM7n, XMM7o, XMM7p |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
639 |
#ifdef _LP64 |
30624 | 640 |
,XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h, XMM8i, XMM8j, XMM8k, XMM8l, XMM8m, XMM8n, XMM8o, XMM8p, |
641 |
XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h, XMM9i, XMM9j, XMM9k, XMM9l, XMM9m, XMM9n, XMM9o, XMM9p, |
|
642 |
XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h, XMM10i, XMM10j, XMM10k, XMM10l, XMM10m, XMM10n, XMM10o, XMM10p, |
|
643 |
XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h, XMM11i, XMM11j, XMM11k, XMM11l, XMM11m, XMM11n, XMM11o, XMM11p, |
|
644 |
XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h, XMM12i, XMM12j, XMM12k, XMM12l, XMM12m, XMM12n, XMM12o, XMM12p, |
|
645 |
XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h, XMM13i, XMM13j, XMM13k, XMM13l, XMM13m, XMM13n, XMM13o, XMM13p, |
|
646 |
XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h, XMM14i, XMM14j, XMM14k, XMM14l, XMM14m, XMM14n, XMM14o, XMM14p, |
|
647 |
XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h, XMM15i, XMM15j, XMM15k, XMM15l, XMM15m, XMM15n, XMM15o, XMM15p |
|
648 |
,XMM16, XMM16b, XMM16c, XMM16d, XMM16e, XMM16f, XMM16g, XMM16h, XMM16i, XMM16j, XMM16k, XMM16l, XMM16m, XMM16n, XMM16o, XMM16p, |
|
649 |
XMM17, XMM17b, XMM17c, XMM17d, XMM17e, XMM17f, XMM17g, XMM17h, XMM17i, XMM17j, XMM17k, XMM17l, XMM17m, XMM17n, XMM17o, XMM17p, |
|
650 |
XMM18, XMM18b, XMM18c, XMM18d, XMM18e, XMM18f, XMM18g, XMM18h, XMM18i, XMM18j, XMM18k, XMM18l, XMM18m, XMM18n, XMM18o, XMM18p, |
|
651 |
XMM19, XMM19b, XMM19c, XMM19d, XMM19e, XMM19f, XMM19g, XMM19h, XMM19i, XMM19j, XMM19k, XMM19l, XMM19m, XMM19n, XMM19o, XMM19p, |
|
652 |
XMM20, XMM20b, XMM20c, XMM20d, XMM20e, XMM20f, XMM20g, XMM20h, XMM20i, XMM20j, XMM20k, XMM20l, XMM20m, XMM20n, XMM20o, XMM20p, |
|
653 |
XMM21, XMM21b, XMM21c, XMM21d, XMM21e, XMM21f, XMM21g, XMM21h, XMM21i, XMM21j, XMM21k, XMM21l, XMM21m, XMM21n, XMM21o, XMM21p, |
|
654 |
XMM22, XMM22b, XMM22c, XMM22d, XMM22e, XMM22f, XMM22g, XMM22h, XMM22i, XMM22j, XMM22k, XMM22l, XMM22m, XMM22n, XMM22o, XMM22p, |
|
655 |
XMM23, XMM23b, XMM23c, XMM23d, XMM23e, XMM23f, XMM23g, XMM23h, XMM23i, XMM23j, XMM23k, XMM23l, XMM23m, XMM23n, XMM23o, XMM23p, |
|
656 |
XMM24, XMM24b, XMM24c, XMM24d, XMM24e, XMM24f, XMM24g, XMM24h, XMM24i, XMM24j, XMM24k, XMM24l, XMM24m, XMM24n, XMM24o, XMM24p, |
|
657 |
XMM25, XMM25b, XMM25c, XMM25d, XMM25e, XMM25f, XMM25g, XMM25h, XMM25i, XMM25j, XMM25k, XMM25l, XMM25m, XMM25n, XMM25o, XMM25p, |
|
658 |
XMM26, XMM26b, XMM26c, XMM26d, XMM26e, XMM26f, XMM26g, XMM26h, XMM26i, XMM26j, XMM26k, XMM26l, XMM26m, XMM26n, XMM26o, XMM26p, |
|
659 |
XMM27, XMM27b, XMM27c, XMM27d, XMM27e, XMM27f, XMM27g, XMM27h, XMM27i, XMM27j, XMM27k, XMM27l, XMM27m, XMM27n, XMM27o, XMM27p, |
|
660 |
XMM28, XMM28b, XMM28c, XMM28d, XMM28e, XMM28f, XMM28g, XMM28h, XMM28i, XMM28j, XMM28k, XMM28l, XMM28m, XMM28n, XMM28o, XMM28p, |
|
661 |
XMM29, XMM29b, XMM29c, XMM29d, XMM29e, XMM29f, XMM29g, XMM29h, XMM29i, XMM29j, XMM29k, XMM29l, XMM29m, XMM29n, XMM29o, XMM29p, |
|
662 |
XMM30, XMM30b, XMM30c, XMM30d, XMM30e, XMM30f, XMM30g, XMM30h, XMM30i, XMM30j, XMM30k, XMM30l, XMM30m, XMM30n, XMM30o, XMM30p, |
|
663 |
XMM31, XMM31b, XMM31c, XMM31d, XMM31e, XMM31f, XMM31g, XMM31h, XMM31i, XMM31j, XMM31k, XMM31l, XMM31m, XMM31n, XMM31o, XMM31p |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
664 |
#endif |
30624 | 665 |
); |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
666 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
667 |
// flags allocation class should be last. |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
668 |
alloc_class chunk2(RFLAGS); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
669 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
670 |
// Singleton class for condition codes |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
671 |
reg_class int_flags(RFLAGS); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
672 |
|
30624 | 673 |
// Class for pre evex float registers |
674 |
reg_class float_reg_legacy(XMM0, |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
675 |
XMM1, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
676 |
XMM2, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
677 |
XMM3, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
678 |
XMM4, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
679 |
XMM5, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
680 |
XMM6, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
681 |
XMM7 |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
682 |
#ifdef _LP64 |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
683 |
,XMM8, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
684 |
XMM9, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
685 |
XMM10, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
686 |
XMM11, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
687 |
XMM12, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
688 |
XMM13, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
689 |
XMM14, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
690 |
XMM15 |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
691 |
#endif |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
692 |
); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
693 |
|
30624 | 694 |
// Class for evex float registers |
695 |
reg_class float_reg_evex(XMM0, |
|
696 |
XMM1, |
|
697 |
XMM2, |
|
698 |
XMM3, |
|
699 |
XMM4, |
|
700 |
XMM5, |
|
701 |
XMM6, |
|
702 |
XMM7 |
|
703 |
#ifdef _LP64 |
|
704 |
,XMM8, |
|
705 |
XMM9, |
|
706 |
XMM10, |
|
707 |
XMM11, |
|
708 |
XMM12, |
|
709 |
XMM13, |
|
710 |
XMM14, |
|
711 |
XMM15, |
|
712 |
XMM16, |
|
713 |
XMM17, |
|
714 |
XMM18, |
|
715 |
XMM19, |
|
716 |
XMM20, |
|
717 |
XMM21, |
|
718 |
XMM22, |
|
719 |
XMM23, |
|
720 |
XMM24, |
|
721 |
XMM25, |
|
722 |
XMM26, |
|
723 |
XMM27, |
|
724 |
XMM28, |
|
725 |
XMM29, |
|
726 |
XMM30, |
|
727 |
XMM31 |
|
728 |
#endif |
|
729 |
); |
|
730 |
||
731 |
reg_class_dynamic float_reg(float_reg_evex, float_reg_legacy, %{ VM_Version::supports_evex() %} ); |
|
51857 | 732 |
reg_class_dynamic float_reg_vl(float_reg_evex, float_reg_legacy, %{ VM_Version::supports_evex() && VM_Version::supports_avx512vl() %} ); |
30624 | 733 |
|
734 |
// Class for pre evex double registers |
|
735 |
reg_class double_reg_legacy(XMM0, XMM0b, |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
736 |
XMM1, XMM1b, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
737 |
XMM2, XMM2b, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
738 |
XMM3, XMM3b, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
739 |
XMM4, XMM4b, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
740 |
XMM5, XMM5b, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
741 |
XMM6, XMM6b, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
742 |
XMM7, XMM7b |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
743 |
#ifdef _LP64 |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
744 |
,XMM8, XMM8b, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
745 |
XMM9, XMM9b, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
746 |
XMM10, XMM10b, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
747 |
XMM11, XMM11b, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
748 |
XMM12, XMM12b, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
749 |
XMM13, XMM13b, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
750 |
XMM14, XMM14b, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
751 |
XMM15, XMM15b |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
752 |
#endif |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
753 |
); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
754 |
|
30624 | 755 |
// Class for evex double registers |
756 |
reg_class double_reg_evex(XMM0, XMM0b, |
|
757 |
XMM1, XMM1b, |
|
758 |
XMM2, XMM2b, |
|
759 |
XMM3, XMM3b, |
|
760 |
XMM4, XMM4b, |
|
761 |
XMM5, XMM5b, |
|
762 |
XMM6, XMM6b, |
|
763 |
XMM7, XMM7b |
|
764 |
#ifdef _LP64 |
|
765 |
,XMM8, XMM8b, |
|
766 |
XMM9, XMM9b, |
|
767 |
XMM10, XMM10b, |
|
768 |
XMM11, XMM11b, |
|
769 |
XMM12, XMM12b, |
|
770 |
XMM13, XMM13b, |
|
771 |
XMM14, XMM14b, |
|
772 |
XMM15, XMM15b, |
|
773 |
XMM16, XMM16b, |
|
774 |
XMM17, XMM17b, |
|
775 |
XMM18, XMM18b, |
|
776 |
XMM19, XMM19b, |
|
777 |
XMM20, XMM20b, |
|
778 |
XMM21, XMM21b, |
|
779 |
XMM22, XMM22b, |
|
780 |
XMM23, XMM23b, |
|
781 |
XMM24, XMM24b, |
|
782 |
XMM25, XMM25b, |
|
783 |
XMM26, XMM26b, |
|
784 |
XMM27, XMM27b, |
|
785 |
XMM28, XMM28b, |
|
786 |
XMM29, XMM29b, |
|
787 |
XMM30, XMM30b, |
|
788 |
XMM31, XMM31b |
|
789 |
#endif |
|
790 |
); |
|
791 |
||
792 |
reg_class_dynamic double_reg(double_reg_evex, double_reg_legacy, %{ VM_Version::supports_evex() %} ); |
|
51857 | 793 |
reg_class_dynamic double_reg_vl(double_reg_evex, double_reg_legacy, %{ VM_Version::supports_evex() && VM_Version::supports_avx512vl() %} ); |
30624 | 794 |
|
795 |
// Class for pre evex 32bit vector registers |
|
796 |
reg_class vectors_reg_legacy(XMM0, |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
797 |
XMM1, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
798 |
XMM2, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
799 |
XMM3, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
800 |
XMM4, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
801 |
XMM5, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
802 |
XMM6, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
803 |
XMM7 |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
804 |
#ifdef _LP64 |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
805 |
,XMM8, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
806 |
XMM9, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
807 |
XMM10, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
808 |
XMM11, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
809 |
XMM12, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
810 |
XMM13, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
811 |
XMM14, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
812 |
XMM15 |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
813 |
#endif |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
814 |
); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
815 |
|
30624 | 816 |
// Class for evex 32bit vector registers |
817 |
reg_class vectors_reg_evex(XMM0, |
|
818 |
XMM1, |
|
819 |
XMM2, |
|
820 |
XMM3, |
|
821 |
XMM4, |
|
822 |
XMM5, |
|
823 |
XMM6, |
|
824 |
XMM7 |
|
825 |
#ifdef _LP64 |
|
826 |
,XMM8, |
|
827 |
XMM9, |
|
828 |
XMM10, |
|
829 |
XMM11, |
|
830 |
XMM12, |
|
831 |
XMM13, |
|
832 |
XMM14, |
|
833 |
XMM15, |
|
834 |
XMM16, |
|
835 |
XMM17, |
|
836 |
XMM18, |
|
837 |
XMM19, |
|
838 |
XMM20, |
|
839 |
XMM21, |
|
840 |
XMM22, |
|
841 |
XMM23, |
|
842 |
XMM24, |
|
843 |
XMM25, |
|
844 |
XMM26, |
|
845 |
XMM27, |
|
846 |
XMM28, |
|
847 |
XMM29, |
|
848 |
XMM30, |
|
849 |
XMM31 |
|
850 |
#endif |
|
851 |
); |
|
852 |
||
853 |
reg_class_dynamic vectors_reg(vectors_reg_evex, vectors_reg_legacy, %{ VM_Version::supports_evex() %} ); |
|
51857 | 854 |
reg_class_dynamic vectors_reg_vlbwdq(vectors_reg_evex, vectors_reg_legacy, %{ VM_Version::supports_avx512vlbwdq() %} ); |
30624 | 855 |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
856 |
// Class for all 64bit vector registers |
30624 | 857 |
reg_class vectord_reg_legacy(XMM0, XMM0b, |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
858 |
XMM1, XMM1b, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
859 |
XMM2, XMM2b, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
860 |
XMM3, XMM3b, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
861 |
XMM4, XMM4b, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
862 |
XMM5, XMM5b, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
863 |
XMM6, XMM6b, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
864 |
XMM7, XMM7b |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
865 |
#ifdef _LP64 |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
866 |
,XMM8, XMM8b, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
867 |
XMM9, XMM9b, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
868 |
XMM10, XMM10b, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
869 |
XMM11, XMM11b, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
870 |
XMM12, XMM12b, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
871 |
XMM13, XMM13b, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
872 |
XMM14, XMM14b, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
873 |
XMM15, XMM15b |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
874 |
#endif |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
875 |
); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
876 |
|
30624 | 877 |
// Class for all 64bit vector registers |
878 |
reg_class vectord_reg_evex(XMM0, XMM0b, |
|
879 |
XMM1, XMM1b, |
|
880 |
XMM2, XMM2b, |
|
881 |
XMM3, XMM3b, |
|
882 |
XMM4, XMM4b, |
|
883 |
XMM5, XMM5b, |
|
884 |
XMM6, XMM6b, |
|
885 |
XMM7, XMM7b |
|
886 |
#ifdef _LP64 |
|
887 |
,XMM8, XMM8b, |
|
888 |
XMM9, XMM9b, |
|
889 |
XMM10, XMM10b, |
|
890 |
XMM11, XMM11b, |
|
891 |
XMM12, XMM12b, |
|
892 |
XMM13, XMM13b, |
|
893 |
XMM14, XMM14b, |
|
894 |
XMM15, XMM15b, |
|
895 |
XMM16, XMM16b, |
|
896 |
XMM17, XMM17b, |
|
897 |
XMM18, XMM18b, |
|
898 |
XMM19, XMM19b, |
|
899 |
XMM20, XMM20b, |
|
900 |
XMM21, XMM21b, |
|
901 |
XMM22, XMM22b, |
|
902 |
XMM23, XMM23b, |
|
903 |
XMM24, XMM24b, |
|
904 |
XMM25, XMM25b, |
|
905 |
XMM26, XMM26b, |
|
906 |
XMM27, XMM27b, |
|
907 |
XMM28, XMM28b, |
|
908 |
XMM29, XMM29b, |
|
909 |
XMM30, XMM30b, |
|
910 |
XMM31, XMM31b |
|
911 |
#endif |
|
912 |
); |
|
913 |
||
914 |
reg_class_dynamic vectord_reg(vectord_reg_evex, vectord_reg_legacy, %{ VM_Version::supports_evex() %} ); |
|
51857 | 915 |
reg_class_dynamic vectord_reg_vlbwdq(vectord_reg_evex, vectord_reg_legacy, %{ VM_Version::supports_avx512vlbwdq() %} ); |
30624 | 916 |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
917 |
// Class for all 128bit vector registers |
30624 | 918 |
reg_class vectorx_reg_legacy(XMM0, XMM0b, XMM0c, XMM0d, |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
919 |
XMM1, XMM1b, XMM1c, XMM1d, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
920 |
XMM2, XMM2b, XMM2c, XMM2d, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
921 |
XMM3, XMM3b, XMM3c, XMM3d, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
922 |
XMM4, XMM4b, XMM4c, XMM4d, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
923 |
XMM5, XMM5b, XMM5c, XMM5d, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
924 |
XMM6, XMM6b, XMM6c, XMM6d, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
925 |
XMM7, XMM7b, XMM7c, XMM7d |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
926 |
#ifdef _LP64 |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
927 |
,XMM8, XMM8b, XMM8c, XMM8d, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
928 |
XMM9, XMM9b, XMM9c, XMM9d, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
929 |
XMM10, XMM10b, XMM10c, XMM10d, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
930 |
XMM11, XMM11b, XMM11c, XMM11d, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
931 |
XMM12, XMM12b, XMM12c, XMM12d, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
932 |
XMM13, XMM13b, XMM13c, XMM13d, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
933 |
XMM14, XMM14b, XMM14c, XMM14d, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
934 |
XMM15, XMM15b, XMM15c, XMM15d |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
935 |
#endif |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
936 |
); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
937 |
|
30624 | 938 |
// Class for all 128bit vector registers |
939 |
reg_class vectorx_reg_evex(XMM0, XMM0b, XMM0c, XMM0d, |
|
940 |
XMM1, XMM1b, XMM1c, XMM1d, |
|
941 |
XMM2, XMM2b, XMM2c, XMM2d, |
|
942 |
XMM3, XMM3b, XMM3c, XMM3d, |
|
943 |
XMM4, XMM4b, XMM4c, XMM4d, |
|
944 |
XMM5, XMM5b, XMM5c, XMM5d, |
|
945 |
XMM6, XMM6b, XMM6c, XMM6d, |
|
946 |
XMM7, XMM7b, XMM7c, XMM7d |
|
947 |
#ifdef _LP64 |
|
948 |
,XMM8, XMM8b, XMM8c, XMM8d, |
|
949 |
XMM9, XMM9b, XMM9c, XMM9d, |
|
950 |
XMM10, XMM10b, XMM10c, XMM10d, |
|
951 |
XMM11, XMM11b, XMM11c, XMM11d, |
|
952 |
XMM12, XMM12b, XMM12c, XMM12d, |
|
953 |
XMM13, XMM13b, XMM13c, XMM13d, |
|
954 |
XMM14, XMM14b, XMM14c, XMM14d, |
|
955 |
XMM15, XMM15b, XMM15c, XMM15d, |
|
956 |
XMM16, XMM16b, XMM16c, XMM16d, |
|
957 |
XMM17, XMM17b, XMM17c, XMM17d, |
|
958 |
XMM18, XMM18b, XMM18c, XMM18d, |
|
959 |
XMM19, XMM19b, XMM19c, XMM19d, |
|
960 |
XMM20, XMM20b, XMM20c, XMM20d, |
|
961 |
XMM21, XMM21b, XMM21c, XMM21d, |
|
962 |
XMM22, XMM22b, XMM22c, XMM22d, |
|
963 |
XMM23, XMM23b, XMM23c, XMM23d, |
|
964 |
XMM24, XMM24b, XMM24c, XMM24d, |
|
965 |
XMM25, XMM25b, XMM25c, XMM25d, |
|
966 |
XMM26, XMM26b, XMM26c, XMM26d, |
|
967 |
XMM27, XMM27b, XMM27c, XMM27d, |
|
968 |
XMM28, XMM28b, XMM28c, XMM28d, |
|
969 |
XMM29, XMM29b, XMM29c, XMM29d, |
|
970 |
XMM30, XMM30b, XMM30c, XMM30d, |
|
971 |
XMM31, XMM31b, XMM31c, XMM31d |
|
972 |
#endif |
|
973 |
); |
|
974 |
||
975 |
reg_class_dynamic vectorx_reg(vectorx_reg_evex, vectorx_reg_legacy, %{ VM_Version::supports_evex() %} ); |
|
51857 | 976 |
reg_class_dynamic vectorx_reg_vlbwdq(vectorx_reg_evex, vectorx_reg_legacy, %{ VM_Version::supports_avx512vlbwdq() %} ); |
30624 | 977 |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
978 |
// Class for all 256bit vector registers |
30624 | 979 |
reg_class vectory_reg_legacy(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h, |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
980 |
XMM1, XMM1b, XMM1c, XMM1d, XMM1e, XMM1f, XMM1g, XMM1h, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
981 |
XMM2, XMM2b, XMM2c, XMM2d, XMM2e, XMM2f, XMM2g, XMM2h, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
982 |
XMM3, XMM3b, XMM3c, XMM3d, XMM3e, XMM3f, XMM3g, XMM3h, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
983 |
XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
984 |
XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
985 |
XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
986 |
XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
987 |
#ifdef _LP64 |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
988 |
,XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
989 |
XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
990 |
XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
991 |
XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
992 |
XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
993 |
XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
994 |
XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
995 |
XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
996 |
#endif |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
997 |
); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
998 |
|
30624 | 999 |
// Class for all 256bit vector registers |
1000 |
reg_class vectory_reg_evex(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h, |
|
1001 |
XMM1, XMM1b, XMM1c, XMM1d, XMM1e, XMM1f, XMM1g, XMM1h, |
|
1002 |
XMM2, XMM2b, XMM2c, XMM2d, XMM2e, XMM2f, XMM2g, XMM2h, |
|
1003 |
XMM3, XMM3b, XMM3c, XMM3d, XMM3e, XMM3f, XMM3g, XMM3h, |
|
1004 |
XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h, |
|
1005 |
XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h, |
|
1006 |
XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h, |
|
1007 |
XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h |
|
1008 |
#ifdef _LP64 |
|
1009 |
,XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h, |
|
1010 |
XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h, |
|
1011 |
XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h, |
|
1012 |
XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h, |
|
1013 |
XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h, |
|
1014 |
XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h, |
|
1015 |
XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h, |
|
1016 |
XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h, |
|
1017 |
XMM16, XMM16b, XMM16c, XMM16d, XMM16e, XMM16f, XMM16g, XMM16h, |
|
1018 |
XMM17, XMM17b, XMM17c, XMM17d, XMM17e, XMM17f, XMM17g, XMM17h, |
|
1019 |
XMM18, XMM18b, XMM18c, XMM18d, XMM18e, XMM18f, XMM18g, XMM18h, |
|
1020 |
XMM19, XMM19b, XMM19c, XMM19d, XMM19e, XMM19f, XMM19g, XMM19h, |
|
1021 |
XMM20, XMM20b, XMM20c, XMM20d, XMM20e, XMM20f, XMM20g, XMM20h, |
|
1022 |
XMM21, XMM21b, XMM21c, XMM21d, XMM21e, XMM21f, XMM21g, XMM21h, |
|
1023 |
XMM22, XMM22b, XMM22c, XMM22d, XMM22e, XMM22f, XMM22g, XMM22h, |
|
1024 |
XMM23, XMM23b, XMM23c, XMM23d, XMM23e, XMM23f, XMM23g, XMM23h, |
|
1025 |
XMM24, XMM24b, XMM24c, XMM24d, XMM24e, XMM24f, XMM24g, XMM24h, |
|
1026 |
XMM25, XMM25b, XMM25c, XMM25d, XMM25e, XMM25f, XMM25g, XMM25h, |
|
1027 |
XMM26, XMM26b, XMM26c, XMM26d, XMM26e, XMM26f, XMM26g, XMM26h, |
|
1028 |
XMM27, XMM27b, XMM27c, XMM27d, XMM27e, XMM27f, XMM27g, XMM27h, |
|
1029 |
XMM28, XMM28b, XMM28c, XMM28d, XMM28e, XMM28f, XMM28g, XMM28h, |
|
1030 |
XMM29, XMM29b, XMM29c, XMM29d, XMM29e, XMM29f, XMM29g, XMM29h, |
|
1031 |
XMM30, XMM30b, XMM30c, XMM30d, XMM30e, XMM30f, XMM30g, XMM30h, |
|
1032 |
XMM31, XMM31b, XMM31c, XMM31d, XMM31e, XMM31f, XMM31g, XMM31h |
|
1033 |
#endif |
|
1034 |
); |
|
1035 |
||
1036 |
reg_class_dynamic vectory_reg(vectory_reg_evex, vectory_reg_legacy, %{ VM_Version::supports_evex() %} ); |
|
51857 | 1037 |
reg_class_dynamic vectory_reg_vlbwdq(vectory_reg_evex, vectory_reg_legacy, %{ VM_Version::supports_avx512vlbwdq() %} ); |
30624 | 1038 |
|
1039 |
// Class for all 512bit vector registers |
|
51857 | 1040 |
reg_class vectorz_reg_evex(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h, XMM0i, XMM0j, XMM0k, XMM0l, XMM0m, XMM0n, XMM0o, XMM0p, |
30624 | 1041 |
XMM1, XMM1b, XMM1c, XMM1d, XMM1e, XMM1f, XMM1g, XMM1h, XMM1i, XMM1j, XMM1k, XMM1l, XMM1m, XMM1n, XMM1o, XMM1p, |
1042 |
XMM2, XMM2b, XMM2c, XMM2d, XMM2e, XMM2f, XMM2g, XMM2h, XMM2i, XMM2j, XMM2k, XMM2l, XMM2m, XMM2n, XMM2o, XMM2p, |
|
1043 |
XMM3, XMM3b, XMM3c, XMM3d, XMM3e, XMM3f, XMM3g, XMM3h, XMM3i, XMM3j, XMM3k, XMM3l, XMM3m, XMM3n, XMM3o, XMM3p, |
|
1044 |
XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h, XMM4i, XMM4j, XMM4k, XMM4l, XMM4m, XMM4n, XMM4o, XMM4p, |
|
1045 |
XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h, XMM5i, XMM5j, XMM5k, XMM5l, XMM5m, XMM5n, XMM5o, XMM5p, |
|
1046 |
XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h, XMM6i, XMM6j, XMM6k, XMM6l, XMM6m, XMM6n, XMM6o, XMM6p, |
|
1047 |
XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h, XMM7i, XMM7j, XMM7k, XMM7l, XMM7m, XMM7n, XMM7o, XMM7p |
|
1048 |
#ifdef _LP64 |
|
1049 |
,XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h, XMM8i, XMM8j, XMM8k, XMM8l, XMM8m, XMM8n, XMM8o, XMM8p, |
|
1050 |
XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h, XMM9i, XMM9j, XMM9k, XMM9l, XMM9m, XMM9n, XMM9o, XMM9p, |
|
1051 |
XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h, XMM10i, XMM10j, XMM10k, XMM10l, XMM10m, XMM10n, XMM10o, XMM10p, |
|
1052 |
XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h, XMM11i, XMM11j, XMM11k, XMM11l, XMM11m, XMM11n, XMM11o, XMM11p, |
|
1053 |
XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h, XMM12i, XMM12j, XMM12k, XMM12l, XMM12m, XMM12n, XMM12o, XMM12p, |
|
1054 |
XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h, XMM13i, XMM13j, XMM13k, XMM13l, XMM13m, XMM13n, XMM13o, XMM13p, |
|
1055 |
XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h, XMM14i, XMM14j, XMM14k, XMM14l, XMM14m, XMM14n, XMM14o, XMM14p, |
|
1056 |
XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h, XMM15i, XMM15j, XMM15k, XMM15l, XMM15m, XMM15n, XMM15o, XMM15p |
|
1057 |
,XMM16, XMM16b, XMM16c, XMM16d, XMM16e, XMM16f, XMM16g, XMM16h, XMM16i, XMM16j, XMM16k, XMM16l, XMM16m, XMM16n, XMM16o, XMM16p, |
|
1058 |
XMM17, XMM17b, XMM17c, XMM17d, XMM17e, XMM17f, XMM17g, XMM17h, XMM17i, XMM17j, XMM17k, XMM17l, XMM17m, XMM17n, XMM17o, XMM17p, |
|
1059 |
XMM18, XMM18b, XMM18c, XMM18d, XMM18e, XMM18f, XMM18g, XMM18h, XMM18i, XMM18j, XMM18k, XMM18l, XMM18m, XMM18n, XMM18o, XMM18p, |
|
1060 |
XMM19, XMM19b, XMM19c, XMM19d, XMM19e, XMM19f, XMM19g, XMM19h, XMM19i, XMM19j, XMM19k, XMM19l, XMM19m, XMM19n, XMM19o, XMM19p, |
|
1061 |
XMM20, XMM20b, XMM20c, XMM20d, XMM20e, XMM20f, XMM20g, XMM20h, XMM20i, XMM20j, XMM20k, XMM20l, XMM20m, XMM20n, XMM20o, XMM20p, |
|
1062 |
XMM21, XMM21b, XMM21c, XMM21d, XMM21e, XMM21f, XMM21g, XMM21h, XMM21i, XMM21j, XMM21k, XMM21l, XMM21m, XMM21n, XMM21o, XMM21p, |
|
1063 |
XMM22, XMM22b, XMM22c, XMM22d, XMM22e, XMM22f, XMM22g, XMM22h, XMM22i, XMM22j, XMM22k, XMM22l, XMM22m, XMM22n, XMM22o, XMM22p, |
|
1064 |
XMM23, XMM23b, XMM23c, XMM23d, XMM23e, XMM23f, XMM23g, XMM23h, XMM23i, XMM23j, XMM23k, XMM23l, XMM23m, XMM23n, XMM23o, XMM23p, |
|
1065 |
XMM24, XMM24b, XMM24c, XMM24d, XMM24e, XMM24f, XMM24g, XMM24h, XMM24i, XMM24j, XMM24k, XMM24l, XMM24m, XMM24n, XMM24o, XMM24p, |
|
1066 |
XMM25, XMM25b, XMM25c, XMM25d, XMM25e, XMM25f, XMM25g, XMM25h, XMM25i, XMM25j, XMM25k, XMM25l, XMM25m, XMM25n, XMM25o, XMM25p, |
|
1067 |
XMM26, XMM26b, XMM26c, XMM26d, XMM26e, XMM26f, XMM26g, XMM26h, XMM26i, XMM26j, XMM26k, XMM26l, XMM26m, XMM26n, XMM26o, XMM26p, |
|
1068 |
XMM27, XMM27b, XMM27c, XMM27d, XMM27e, XMM27f, XMM27g, XMM27h, XMM27i, XMM27j, XMM27k, XMM27l, XMM27m, XMM27n, XMM27o, XMM27p, |
|
1069 |
XMM28, XMM28b, XMM28c, XMM28d, XMM28e, XMM28f, XMM28g, XMM28h, XMM28i, XMM28j, XMM28k, XMM28l, XMM28m, XMM28n, XMM28o, XMM28p, |
|
1070 |
XMM29, XMM29b, XMM29c, XMM29d, XMM29e, XMM29f, XMM29g, XMM29h, XMM29i, XMM29j, XMM29k, XMM29l, XMM29m, XMM29n, XMM29o, XMM29p, |
|
1071 |
XMM30, XMM30b, XMM30c, XMM30d, XMM30e, XMM30f, XMM30g, XMM30h, XMM30i, XMM30j, XMM30k, XMM30l, XMM30m, XMM30n, XMM30o, XMM30p, |
|
1072 |
XMM31, XMM31b, XMM31c, XMM31d, XMM31e, XMM31f, XMM31g, XMM31h, XMM31i, XMM31j, XMM31k, XMM31l, XMM31m, XMM31n, XMM31o, XMM31p |
|
1073 |
#endif |
|
1074 |
); |
|
1075 |
||
51857 | 1076 |
// Class for restricted 512bit vector registers |
1077 |
reg_class vectorz_reg_legacy(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h, XMM0i, XMM0j, XMM0k, XMM0l, XMM0m, XMM0n, XMM0o, XMM0p, |
|
1078 |
XMM1, XMM1b, XMM1c, XMM1d, XMM1e, XMM1f, XMM1g, XMM1h, XMM1i, XMM1j, XMM1k, XMM1l, XMM1m, XMM1n, XMM1o, XMM1p, |
|
1079 |
XMM2, XMM2b, XMM2c, XMM2d, XMM2e, XMM2f, XMM2g, XMM2h, XMM2i, XMM2j, XMM2k, XMM2l, XMM2m, XMM2n, XMM2o, XMM2p, |
|
1080 |
XMM3, XMM3b, XMM3c, XMM3d, XMM3e, XMM3f, XMM3g, XMM3h, XMM3i, XMM3j, XMM3k, XMM3l, XMM3m, XMM3n, XMM3o, XMM3p, |
|
1081 |
XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h, XMM4i, XMM4j, XMM4k, XMM4l, XMM4m, XMM4n, XMM4o, XMM4p, |
|
1082 |
XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h, XMM5i, XMM5j, XMM5k, XMM5l, XMM5m, XMM5n, XMM5o, XMM5p, |
|
1083 |
XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h, XMM6i, XMM6j, XMM6k, XMM6l, XMM6m, XMM6n, XMM6o, XMM6p, |
|
1084 |
XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h, XMM7i, XMM7j, XMM7k, XMM7l, XMM7m, XMM7n, XMM7o, XMM7p |
|
1085 |
#ifdef _LP64 |
|
1086 |
,XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h, XMM8i, XMM8j, XMM8k, XMM8l, XMM8m, XMM8n, XMM8o, XMM8p, |
|
1087 |
XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h, XMM9i, XMM9j, XMM9k, XMM9l, XMM9m, XMM9n, XMM9o, XMM9p, |
|
1088 |
XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h, XMM10i, XMM10j, XMM10k, XMM10l, XMM10m, XMM10n, XMM10o, XMM10p, |
|
1089 |
XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h, XMM11i, XMM11j, XMM11k, XMM11l, XMM11m, XMM11n, XMM11o, XMM11p, |
|
1090 |
XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h, XMM12i, XMM12j, XMM12k, XMM12l, XMM12m, XMM12n, XMM12o, XMM12p, |
|
1091 |
XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h, XMM13i, XMM13j, XMM13k, XMM13l, XMM13m, XMM13n, XMM13o, XMM13p, |
|
1092 |
XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h, XMM14i, XMM14j, XMM14k, XMM14l, XMM14m, XMM14n, XMM14o, XMM14p, |
|
1093 |
XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h, XMM15i, XMM15j, XMM15k, XMM15l, XMM15m, XMM15n, XMM15o, XMM15p |
|
1094 |
#endif |
|
1095 |
); |
|
1096 |
||
1097 |
reg_class_dynamic vectorz_reg(vectorz_reg_evex, vectorz_reg_legacy, %{ VM_Version::supports_evex() %} ); |
|
1098 |
reg_class_dynamic vectorz_reg_vl(vectorz_reg_evex, vectorz_reg_legacy, %{ VM_Version::supports_evex() && VM_Version::supports_avx512vl() %} ); |
|
1099 |
||
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1100 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1101 |
|
23498
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1102 |
|
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1103 |
//----------SOURCE BLOCK------------------------------------------------------- |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1104 |
// This is a block of C++ code which provides values, functions, and |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1105 |
// definitions necessary in the rest of the architecture description |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1106 |
|
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1107 |
source_hpp %{ |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1108 |
// Header information of the source block. |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1109 |
// Method declarations/definitions which are used outside |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1110 |
// the ad-scope can conveniently be defined here. |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1111 |
// |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1112 |
// To keep related declarations/definitions/uses close together, |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1113 |
// we switch between source %{ }% and source_hpp %{ }% freely as needed. |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1114 |
|
25715
d5a8dbdc5150
8049325: Introduce and clean up umbrella headers for the files in the cpu subdirectories.
goetz
parents:
23498
diff
changeset
|
1115 |
class NativeJump; |
d5a8dbdc5150
8049325: Introduce and clean up umbrella headers for the files in the cpu subdirectories.
goetz
parents:
23498
diff
changeset
|
1116 |
|
23498
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1117 |
class CallStubImpl { |
30211 | 1118 |
|
23498
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1119 |
//-------------------------------------------------------------- |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1120 |
//---< Used for optimization in Compile::shorten_branches >--- |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1121 |
//-------------------------------------------------------------- |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1122 |
|
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1123 |
public: |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1124 |
// Size of call trampoline stub. |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1125 |
static uint size_call_trampoline() { |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1126 |
return 0; // no call trampolines on this platform |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1127 |
} |
30211 | 1128 |
|
23498
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1129 |
// number of relocations needed by a call trampoline stub |
30211 | 1130 |
static uint reloc_call_trampoline() { |
23498
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1131 |
return 0; // no call trampolines on this platform |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1132 |
} |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1133 |
}; |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1134 |
|
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1135 |
class HandlerImpl { |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1136 |
|
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1137 |
public: |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1138 |
|
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1139 |
static int emit_exception_handler(CodeBuffer &cbuf); |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1140 |
static int emit_deopt_handler(CodeBuffer& cbuf); |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1141 |
|
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1142 |
static uint size_exception_handler() { |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1143 |
// NativeCall instruction size is the same as NativeJump. |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1144 |
// exception handler starts out as jump and can be patched to |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1145 |
// a call be deoptimization. (4932387) |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1146 |
// Note that this value is also credited (in output.cpp) to |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1147 |
// the size of the code section. |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1148 |
return NativeJump::instruction_size; |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1149 |
} |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1150 |
|
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1151 |
#ifdef _LP64 |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1152 |
static uint size_deopt_handler() { |
51633
21154cb84d2a
8209594: guarantee(this->is8bit(imm8)) failed: Short forward jump exceeds 8-bit offset
kvn
parents:
51078
diff
changeset
|
1153 |
// three 5 byte instructions plus one move for unreachable address. |
21154cb84d2a
8209594: guarantee(this->is8bit(imm8)) failed: Short forward jump exceeds 8-bit offset
kvn
parents:
51078
diff
changeset
|
1154 |
return 15+3; |
23498
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1155 |
} |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1156 |
#else |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1157 |
static uint size_deopt_handler() { |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1158 |
// NativeCall instruction size is the same as NativeJump. |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1159 |
// exception handler starts out as jump and can be patched to |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1160 |
// a call be deoptimization. (4932387) |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1161 |
// Note that this value is also credited (in output.cpp) to |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1162 |
// the size of the code section. |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1163 |
return 5 + NativeJump::instruction_size; // pushl(); jmp; |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1164 |
} |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1165 |
#endif |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1166 |
}; |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1167 |
|
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1168 |
%} // end source_hpp |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1169 |
|
11429 | 1170 |
source %{ |
23498
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1171 |
|
38286
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1172 |
#include "opto/addnode.hpp" |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1173 |
|
23498
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1174 |
// Emit exception handler code. |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1175 |
// Stuff framesize into a register and call a VM stub routine. |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1176 |
int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) { |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1177 |
|
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1178 |
// Note that the code buffer's insts_mark is always relative to insts. |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1179 |
// That's why we must use the macroassembler to generate a handler. |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1180 |
MacroAssembler _masm(&cbuf); |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1181 |
address base = __ start_a_stub(size_exception_handler()); |
32082
2a3323e25de1
8130309: Need to bailout cleanly if creation of stubs fails when codecache is out of space
thartmann
parents:
31410
diff
changeset
|
1182 |
if (base == NULL) { |
2a3323e25de1
8130309: Need to bailout cleanly if creation of stubs fails when codecache is out of space
thartmann
parents:
31410
diff
changeset
|
1183 |
ciEnv::current()->record_failure("CodeCache is full"); |
2a3323e25de1
8130309: Need to bailout cleanly if creation of stubs fails when codecache is out of space
thartmann
parents:
31410
diff
changeset
|
1184 |
return 0; // CodeBuffer::expand failed |
2a3323e25de1
8130309: Need to bailout cleanly if creation of stubs fails when codecache is out of space
thartmann
parents:
31410
diff
changeset
|
1185 |
} |
23498
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1186 |
int offset = __ offset(); |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1187 |
__ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point())); |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1188 |
assert(__ offset() - offset <= (int) size_exception_handler(), "overflow"); |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1189 |
__ end_a_stub(); |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1190 |
return offset; |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1191 |
} |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1192 |
|
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1193 |
// Emit deopt handler code. |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1194 |
int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) { |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1195 |
|
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1196 |
// Note that the code buffer's insts_mark is always relative to insts. |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1197 |
// That's why we must use the macroassembler to generate a handler. |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1198 |
MacroAssembler _masm(&cbuf); |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1199 |
address base = __ start_a_stub(size_deopt_handler()); |
32082
2a3323e25de1
8130309: Need to bailout cleanly if creation of stubs fails when codecache is out of space
thartmann
parents:
31410
diff
changeset
|
1200 |
if (base == NULL) { |
2a3323e25de1
8130309: Need to bailout cleanly if creation of stubs fails when codecache is out of space
thartmann
parents:
31410
diff
changeset
|
1201 |
ciEnv::current()->record_failure("CodeCache is full"); |
2a3323e25de1
8130309: Need to bailout cleanly if creation of stubs fails when codecache is out of space
thartmann
parents:
31410
diff
changeset
|
1202 |
return 0; // CodeBuffer::expand failed |
2a3323e25de1
8130309: Need to bailout cleanly if creation of stubs fails when codecache is out of space
thartmann
parents:
31410
diff
changeset
|
1203 |
} |
23498
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1204 |
int offset = __ offset(); |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1205 |
|
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1206 |
#ifdef _LP64 |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1207 |
address the_pc = (address) __ pc(); |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1208 |
Label next; |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1209 |
// push a "the_pc" on the stack without destroying any registers |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1210 |
// as they all may be live. |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1211 |
|
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1212 |
// push address of "next" |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1213 |
__ call(next, relocInfo::none); // reloc none is fine since it is a disp32 |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1214 |
__ bind(next); |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1215 |
// adjust it so it matches "the_pc" |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1216 |
__ subptr(Address(rsp, 0), __ offset() - offset); |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1217 |
#else |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1218 |
InternalAddress here(__ pc()); |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1219 |
__ pushptr(here.addr()); |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1220 |
#endif |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1221 |
|
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1222 |
__ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack())); |
51633
21154cb84d2a
8209594: guarantee(this->is8bit(imm8)) failed: Short forward jump exceeds 8-bit offset
kvn
parents:
51078
diff
changeset
|
1223 |
assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow %d", (__ offset() - offset)); |
23498
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1224 |
__ end_a_stub(); |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1225 |
return offset; |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1226 |
} |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1227 |
|
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1228 |
|
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1229 |
//============================================================================= |
a0e67b766e5c
8037821: Account for trampoline stubs when estimating code buffer sizes
goetz
parents:
22505
diff
changeset
|
1230 |
|
11429 | 1231 |
// Float masks come from different places depending on platform. |
1232 |
#ifdef _LP64 |
|
1233 |
static address float_signmask() { return StubRoutines::x86::float_sign_mask(); } |
|
1234 |
static address float_signflip() { return StubRoutines::x86::float_sign_flip(); } |
|
1235 |
static address double_signmask() { return StubRoutines::x86::double_sign_mask(); } |
|
1236 |
static address double_signflip() { return StubRoutines::x86::double_sign_flip(); } |
|
1237 |
#else |
|
1238 |
static address float_signmask() { return (address)float_signmask_pool; } |
|
1239 |
static address float_signflip() { return (address)float_signflip_pool; } |
|
1240 |
static address double_signmask() { return (address)double_signmask_pool; } |
|
1241 |
static address double_signflip() { return (address)double_signflip_pool; } |
|
1242 |
#endif |
|
54750 | 1243 |
static address vector_short_to_byte_mask() { return StubRoutines::x86::vector_short_to_byte_mask(); } |
1244 |
static address vector_byte_perm_mask() { return StubRoutines::x86::vector_byte_perm_mask(); } |
|
1245 |
static address vector_long_sign_mask() { return StubRoutines::x86::vector_long_sign_mask(); } |
|
1246 |
||
1247 |
//============================================================================= |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
1248 |
const bool Matcher::match_rule_supported(int opcode) { |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
1249 |
if (!has_match_rule(opcode)) |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
1250 |
return false; |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
1251 |
|
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1252 |
bool ret_value = true; |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
1253 |
switch (opcode) { |
54750 | 1254 |
case Op_AbsVL: |
1255 |
if (UseAVX < 3) |
|
1256 |
ret_value = false; |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
1257 |
case Op_PopCountI: |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
1258 |
case Op_PopCountL: |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
1259 |
if (!UsePopCountInstruction) |
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1260 |
ret_value = false; |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1261 |
break; |
49384 | 1262 |
case Op_PopCountVI: |
1263 |
if (!UsePopCountInstruction || !VM_Version::supports_vpopcntdq()) |
|
1264 |
ret_value = false; |
|
1265 |
break; |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
1266 |
case Op_MulVI: |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
1267 |
if ((UseSSE < 4) && (UseAVX < 1)) // only with SSE4_1 or AVX |
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1268 |
ret_value = false; |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1269 |
break; |
30624 | 1270 |
case Op_MulVL: |
1271 |
case Op_MulReductionVL: |
|
1272 |
if (VM_Version::supports_avx512dq() == false) |
|
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1273 |
ret_value = false; |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1274 |
break; |
30211 | 1275 |
case Op_AddReductionVL: |
1276 |
if (UseAVX < 3) // only EVEX : vector connectivity becomes an issue here |
|
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1277 |
ret_value = false; |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1278 |
break; |
54750 | 1279 |
case Op_AbsVB: |
1280 |
case Op_AbsVS: |
|
1281 |
case Op_AbsVI: |
|
30211 | 1282 |
case Op_AddReductionVI: |
58643
b381e5328461
8232106: [x86] C2: SIGILL due to usage of SSSE3 instructions on processors which don't support it
mdoerr
parents:
58516
diff
changeset
|
1283 |
if (UseSSE < 3 || !VM_Version::supports_ssse3()) // requires at least SSSE3 |
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1284 |
ret_value = false; |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1285 |
break; |
30211 | 1286 |
case Op_MulReductionVI: |
1287 |
if (UseSSE < 4) // requires at least SSE4 |
|
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1288 |
ret_value = false; |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1289 |
break; |
30211 | 1290 |
case Op_AddReductionVF: |
1291 |
case Op_AddReductionVD: |
|
1292 |
case Op_MulReductionVF: |
|
1293 |
case Op_MulReductionVD: |
|
1294 |
if (UseSSE < 1) // requires at least SSE |
|
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1295 |
ret_value = false; |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1296 |
break; |
32723
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
1297 |
case Op_SqrtVD: |
48089
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
1298 |
case Op_SqrtVF: |
32723
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
1299 |
if (UseAVX < 1) // enabled for AVX only |
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1300 |
ret_value = false; |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1301 |
break; |
13886
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13883
diff
changeset
|
1302 |
case Op_CompareAndSwapL: |
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13883
diff
changeset
|
1303 |
#ifdef _LP64 |
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13883
diff
changeset
|
1304 |
case Op_CompareAndSwapP: |
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13883
diff
changeset
|
1305 |
#endif |
8d82c4dfa722
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
roland
parents:
13883
diff
changeset
|
1306 |
if (!VM_Version::supports_cx8()) |
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1307 |
ret_value = false; |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1308 |
break; |
48309 | 1309 |
case Op_CMoveVF: |
33469
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
1310 |
case Op_CMoveVD: |
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
1311 |
if (UseAVX < 1 || UseAVX > 2) |
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
1312 |
ret_value = false; |
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
1313 |
break; |
35581
dd47cf4734f2
8145336: PPC64: fix string intrinsics after CompactStrings change
simonis
parents:
34162
diff
changeset
|
1314 |
case Op_StrIndexOf: |
dd47cf4734f2
8145336: PPC64: fix string intrinsics after CompactStrings change
simonis
parents:
34162
diff
changeset
|
1315 |
if (!UseSSE42Intrinsics) |
dd47cf4734f2
8145336: PPC64: fix string intrinsics after CompactStrings change
simonis
parents:
34162
diff
changeset
|
1316 |
ret_value = false; |
dd47cf4734f2
8145336: PPC64: fix string intrinsics after CompactStrings change
simonis
parents:
34162
diff
changeset
|
1317 |
break; |
dd47cf4734f2
8145336: PPC64: fix string intrinsics after CompactStrings change
simonis
parents:
34162
diff
changeset
|
1318 |
case Op_StrIndexOfChar: |
39253
bd5fe208734e
8157842: indexOfChar intrinsic is not emitted on x86
thartmann
parents:
38286
diff
changeset
|
1319 |
if (!UseSSE42Intrinsics) |
35581
dd47cf4734f2
8145336: PPC64: fix string intrinsics after CompactStrings change
simonis
parents:
34162
diff
changeset
|
1320 |
ret_value = false; |
dd47cf4734f2
8145336: PPC64: fix string intrinsics after CompactStrings change
simonis
parents:
34162
diff
changeset
|
1321 |
break; |
38017
55047d16f141
8147844: new method j.l.Runtime.onSpinWait() and the corresponding x86 hotspot instrinsic
ikrylov
parents:
36561
diff
changeset
|
1322 |
case Op_OnSpinWait: |
55047d16f141
8147844: new method j.l.Runtime.onSpinWait() and the corresponding x86 hotspot instrinsic
ikrylov
parents:
36561
diff
changeset
|
1323 |
if (VM_Version::supports_on_spin_wait() == false) |
55047d16f141
8147844: new method j.l.Runtime.onSpinWait() and the corresponding x86 hotspot instrinsic
ikrylov
parents:
36561
diff
changeset
|
1324 |
ret_value = false; |
55047d16f141
8147844: new method j.l.Runtime.onSpinWait() and the corresponding x86 hotspot instrinsic
ikrylov
parents:
36561
diff
changeset
|
1325 |
break; |
52992 | 1326 |
case Op_MulAddVS2VI: |
54750 | 1327 |
case Op_RShiftVL: |
1328 |
case Op_AbsVD: |
|
1329 |
case Op_NegVD: |
|
52992 | 1330 |
if (UseSSE < 2) |
1331 |
ret_value = false; |
|
1332 |
break; |
|
54750 | 1333 |
case Op_MulVB: |
1334 |
case Op_LShiftVB: |
|
1335 |
case Op_RShiftVB: |
|
1336 |
case Op_URShiftVB: |
|
1337 |
if (UseSSE < 4) |
|
1338 |
ret_value = false; |
|
1339 |
break; |
|
54022
ff399127078a
8217561: X86: Add floating-point Math.min/max intrinsics
bsrbnd
parents:
53639
diff
changeset
|
1340 |
#ifdef _LP64 |
ff399127078a
8217561: X86: Add floating-point Math.min/max intrinsics
bsrbnd
parents:
53639
diff
changeset
|
1341 |
case Op_MaxD: |
ff399127078a
8217561: X86: Add floating-point Math.min/max intrinsics
bsrbnd
parents:
53639
diff
changeset
|
1342 |
case Op_MaxF: |
ff399127078a
8217561: X86: Add floating-point Math.min/max intrinsics
bsrbnd
parents:
53639
diff
changeset
|
1343 |
case Op_MinD: |
ff399127078a
8217561: X86: Add floating-point Math.min/max intrinsics
bsrbnd
parents:
53639
diff
changeset
|
1344 |
case Op_MinF: |
ff399127078a
8217561: X86: Add floating-point Math.min/max intrinsics
bsrbnd
parents:
53639
diff
changeset
|
1345 |
if (UseAVX < 1) // enabled for AVX only |
ff399127078a
8217561: X86: Add floating-point Math.min/max intrinsics
bsrbnd
parents:
53639
diff
changeset
|
1346 |
ret_value = false; |
ff399127078a
8217561: X86: Add floating-point Math.min/max intrinsics
bsrbnd
parents:
53639
diff
changeset
|
1347 |
break; |
ff399127078a
8217561: X86: Add floating-point Math.min/max intrinsics
bsrbnd
parents:
53639
diff
changeset
|
1348 |
#endif |
57804 | 1349 |
case Op_CacheWB: |
1350 |
case Op_CacheWBPreSync: |
|
1351 |
case Op_CacheWBPostSync: |
|
1352 |
if (!VM_Version::supports_data_cache_line_flush()) { |
|
1353 |
ret_value = false; |
|
1354 |
} |
|
1355 |
break; |
|
58421
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
1356 |
case Op_RoundDoubleMode: |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
1357 |
if (UseSSE < 4) |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
1358 |
ret_value = false; |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
1359 |
break; |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
1360 |
} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
1361 |
|
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1362 |
return ret_value; // Per default match rules are supported. |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
1363 |
} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
1364 |
|
34162 | 1365 |
const bool Matcher::match_rule_supported_vector(int opcode, int vlen) { |
1366 |
// identify extra cases that we might want to provide match rules for |
|
1367 |
// e.g. Op_ vector nodes and other intrinsics while guarding with vlen |
|
1368 |
bool ret_value = match_rule_supported(opcode); |
|
1369 |
if (ret_value) { |
|
1370 |
switch (opcode) { |
|
54750 | 1371 |
case Op_AbsVB: |
34162 | 1372 |
case Op_AddVB: |
1373 |
case Op_SubVB: |
|
1374 |
if ((vlen == 64) && (VM_Version::supports_avx512bw() == false)) |
|
1375 |
ret_value = false; |
|
1376 |
break; |
|
54750 | 1377 |
case Op_AbsVS: |
34162 | 1378 |
case Op_AddVS: |
1379 |
case Op_SubVS: |
|
54750 | 1380 |
case Op_MulVS: |
1381 |
case Op_LShiftVS: |
|
1382 |
case Op_RShiftVS: |
|
1383 |
case Op_URShiftVS: |
|
34162 | 1384 |
if ((vlen == 32) && (VM_Version::supports_avx512bw() == false)) |
1385 |
ret_value = false; |
|
1386 |
break; |
|
54750 | 1387 |
case Op_MulVB: |
1388 |
case Op_LShiftVB: |
|
1389 |
case Op_RShiftVB: |
|
1390 |
case Op_URShiftVB: |
|
1391 |
if ((vlen == 32 && UseAVX < 2) || |
|
1392 |
((vlen == 64) && (VM_Version::supports_avx512bw() == false))) |
|
1393 |
ret_value = false; |
|
1394 |
break; |
|
1395 |
case Op_NegVF: |
|
1396 |
if ((vlen == 16) && (VM_Version::supports_avx512dq() == false)) |
|
1397 |
ret_value = false; |
|
1398 |
break; |
|
48309 | 1399 |
case Op_CMoveVF: |
1400 |
if (vlen != 8) |
|
1401 |
ret_value = false; |
|
51078 | 1402 |
break; |
54750 | 1403 |
case Op_NegVD: |
1404 |
if ((vlen == 8) && (VM_Version::supports_avx512dq() == false)) |
|
1405 |
ret_value = false; |
|
1406 |
break; |
|
34162 | 1407 |
case Op_CMoveVD: |
1408 |
if (vlen != 4) |
|
1409 |
ret_value = false; |
|
1410 |
break; |
|
58421
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
1411 |
case Op_RoundDoubleModeV: |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
1412 |
if (VM_Version::supports_avx() == false) |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
1413 |
ret_value = false; |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
1414 |
break; |
34162 | 1415 |
} |
1416 |
} |
|
1417 |
||
1418 |
return ret_value; // Per default match rules are supported. |
|
1419 |
} |
|
1420 |
||
38049 | 1421 |
const bool Matcher::has_predicated_vectors(void) { |
1422 |
bool ret_value = false; |
|
1423 |
if (UseAVX > 2) { |
|
1424 |
ret_value = VM_Version::supports_avx512vl(); |
|
1425 |
} |
|
1426 |
||
1427 |
return ret_value; |
|
1428 |
} |
|
1429 |
||
33065 | 1430 |
const int Matcher::float_pressure(int default_pressure_threshold) { |
1431 |
int float_pressure_threshold = default_pressure_threshold; |
|
1432 |
#ifdef _LP64 |
|
1433 |
if (UseAVX > 2) { |
|
1434 |
// Increase pressure threshold on machines with AVX3 which have |
|
1435 |
// 2x more XMM registers. |
|
1436 |
float_pressure_threshold = default_pressure_threshold * 2; |
|
1437 |
} |
|
1438 |
#endif |
|
1439 |
return float_pressure_threshold; |
|
1440 |
} |
|
1441 |
||
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1442 |
// Max vector size in bytes. 0 if not supported. |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1443 |
const int Matcher::vector_width_in_bytes(BasicType bt) { |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1444 |
assert(is_java_primitive(bt), "only primitive type vectors"); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1445 |
if (UseSSE < 2) return 0; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1446 |
// SSE2 supports 128bit vectors for all types. |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1447 |
// AVX2 supports 256bit vectors for all types. |
30624 | 1448 |
// AVX2/EVEX supports 512bit vectors for all types. |
1449 |
int size = (UseAVX > 1) ? (1 << UseAVX) * 8 : 16; |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1450 |
// AVX1 supports 256bit vectors only for FLOAT and DOUBLE. |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1451 |
if (UseAVX > 0 && (bt == T_FLOAT || bt == T_DOUBLE)) |
30624 | 1452 |
size = (UseAVX > 2) ? 64 : 32; |
51857 | 1453 |
if (UseAVX > 2 && (bt == T_BYTE || bt == T_SHORT || bt == T_CHAR)) |
1454 |
size = (VM_Version::supports_avx512bw()) ? 64 : 32; |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1455 |
// Use flag to limit vector size. |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1456 |
size = MIN2(size,(int)MaxVectorSize); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1457 |
// Minimum 2 values in vector (or 4 for bytes). |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1458 |
switch (bt) { |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1459 |
case T_DOUBLE: |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1460 |
case T_LONG: |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1461 |
if (size < 16) return 0; |
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1462 |
break; |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1463 |
case T_FLOAT: |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1464 |
case T_INT: |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1465 |
if (size < 8) return 0; |
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1466 |
break; |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1467 |
case T_BOOLEAN: |
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1468 |
if (size < 4) return 0; |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1469 |
break; |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1470 |
case T_CHAR: |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1471 |
if (size < 4) return 0; |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1472 |
break; |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1473 |
case T_BYTE: |
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1474 |
if (size < 4) return 0; |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1475 |
break; |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1476 |
case T_SHORT: |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1477 |
if (size < 4) return 0; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1478 |
break; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1479 |
default: |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1480 |
ShouldNotReachHere(); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1481 |
} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1482 |
return size; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1483 |
} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1484 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1485 |
// Limits on vector size (number of elements) loaded into vector. |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1486 |
const int Matcher::max_vector_size(const BasicType bt) { |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1487 |
return vector_width_in_bytes(bt)/type2aelembytes(bt); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1488 |
} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1489 |
const int Matcher::min_vector_size(const BasicType bt) { |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1490 |
int max_size = max_vector_size(bt); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1491 |
// Min size which can be loaded into vector is 4 bytes. |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1492 |
int size = (type2aelembytes(bt) == 1) ? 4 : 2; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1493 |
return MIN2(size,max_size); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1494 |
} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1495 |
|
51857 | 1496 |
// Vector ideal reg corresponding to specified size in bytes |
46378 | 1497 |
const uint Matcher::vector_ideal_reg(int size) { |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1498 |
assert(MaxVectorSize >= size, ""); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1499 |
switch(size) { |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1500 |
case 4: return Op_VecS; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1501 |
case 8: return Op_VecD; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1502 |
case 16: return Op_VecX; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1503 |
case 32: return Op_VecY; |
30624 | 1504 |
case 64: return Op_VecZ; |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1505 |
} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1506 |
ShouldNotReachHere(); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1507 |
return 0; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1508 |
} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1509 |
|
13930 | 1510 |
// Only lowest bits of xmm reg are used for vector shift count. |
46378 | 1511 |
const uint Matcher::vector_shift_count_ideal_reg(int size) { |
13930 | 1512 |
return Op_VecS; |
1513 |
} |
|
1514 |
||
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1515 |
// x86 supports misaligned vectors store/load. |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1516 |
const bool Matcher::misaligned_vectors_ok() { |
53639
da7dc9e92d91
8215483: Off heap memory accesses should be vectorized
roland
parents:
53580
diff
changeset
|
1517 |
return true; |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1518 |
} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1519 |
|
22505 | 1520 |
// x86 AES instructions are compatible with SunJCE expanded |
1521 |
// keys, hence we do not need to pass the original key to stubs |
|
1522 |
const bool Matcher::pass_original_key_for_aes() { |
|
1523 |
return false; |
|
1524 |
} |
|
1525 |
||
38286
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1526 |
|
38236
510f77046e00
8154943: AArch64: redundant address computation instructions with vectorization
roland
parents:
38049
diff
changeset
|
1527 |
const bool Matcher::convi2l_type_required = true; |
510f77046e00
8154943: AArch64: redundant address computation instructions with vectorization
roland
parents:
38049
diff
changeset
|
1528 |
|
38286
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1529 |
// Check for shift by small constant as well |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1530 |
static bool clone_shift(Node* shift, Matcher* matcher, Matcher::MStack& mstack, VectorSet& address_visited) { |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1531 |
if (shift->Opcode() == Op_LShiftX && shift->in(2)->is_Con() && |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1532 |
shift->in(2)->get_int() <= 3 && |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1533 |
// Are there other uses besides address expressions? |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1534 |
!matcher->is_visited(shift)) { |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1535 |
address_visited.set(shift->_idx); // Flag as address_visited |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1536 |
mstack.push(shift->in(2), Matcher::Visit); |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1537 |
Node *conv = shift->in(1); |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1538 |
#ifdef _LP64 |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1539 |
// Allow Matcher to match the rule which bypass |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1540 |
// ConvI2L operation for an array index on LP64 |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1541 |
// if the index value is positive. |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1542 |
if (conv->Opcode() == Op_ConvI2L && |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1543 |
conv->as_Type()->type()->is_long()->_lo >= 0 && |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1544 |
// Are there other uses besides address expressions? |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1545 |
!matcher->is_visited(conv)) { |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1546 |
address_visited.set(conv->_idx); // Flag as address_visited |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1547 |
mstack.push(conv->in(1), Matcher::Pre_Visit); |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1548 |
} else |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1549 |
#endif |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1550 |
mstack.push(conv, Matcher::Pre_Visit); |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1551 |
return true; |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1552 |
} |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1553 |
return false; |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1554 |
} |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1555 |
|
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1556 |
// Should the Matcher clone shifts on addressing modes, expecting them |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1557 |
// to be subsumed into complex addressing expressions or compute them |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1558 |
// into registers? |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1559 |
bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) { |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1560 |
Node *off = m->in(AddPNode::Offset); |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1561 |
if (off->is_Con()) { |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1562 |
address_visited.test_set(m->_idx); // Flag as address_visited |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1563 |
Node *adr = m->in(AddPNode::Address); |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1564 |
|
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1565 |
// Intel can handle 2 adds in addressing mode |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1566 |
// AtomicAdd is not an addressing expression. |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1567 |
// Cheap to find it by looking for screwy base. |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1568 |
if (adr->is_AddP() && |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1569 |
!adr->in(AddPNode::Base)->is_top() && |
53436
80b55cf3a804
8202952: C2: Unexpected dead nodes after matching
vlivanov
parents:
53336
diff
changeset
|
1570 |
LP64_ONLY( off->get_long() == (int) (off->get_long()) && ) // immL32 |
38286
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1571 |
// Are there other uses besides address expressions? |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1572 |
!is_visited(adr)) { |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1573 |
address_visited.set(adr->_idx); // Flag as address_visited |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1574 |
Node *shift = adr->in(AddPNode::Offset); |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1575 |
if (!clone_shift(shift, this, mstack, address_visited)) { |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1576 |
mstack.push(shift, Pre_Visit); |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1577 |
} |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1578 |
mstack.push(adr->in(AddPNode::Address), Pre_Visit); |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1579 |
mstack.push(adr->in(AddPNode::Base), Pre_Visit); |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1580 |
} else { |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1581 |
mstack.push(adr, Pre_Visit); |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1582 |
} |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1583 |
|
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1584 |
// Clone X+offset as it also folds into most addressing expressions |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1585 |
mstack.push(off, Visit); |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1586 |
mstack.push(m->in(AddPNode::Base), Pre_Visit); |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1587 |
return true; |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1588 |
} else if (clone_shift(off, this, mstack, address_visited)) { |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1589 |
address_visited.test_set(m->_idx); // Flag as address_visited |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1590 |
mstack.push(m->in(AddPNode::Address), Pre_Visit); |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1591 |
mstack.push(m->in(AddPNode::Base), Pre_Visit); |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1592 |
return true; |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1593 |
} |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1594 |
return false; |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1595 |
} |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1596 |
|
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1597 |
void Compile::reshape_address(AddPNode* addp) { |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1598 |
} |
0ddb6f84e138
8154826: AArch64: take advantage better of base + shifted offset addressing mode
roland
parents:
38236
diff
changeset
|
1599 |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1600 |
// Helper methods for MachSpillCopyNode::implementation(). |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1601 |
static int vec_mov_helper(CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1602 |
int src_hi, int dst_hi, uint ireg, outputStream* st) { |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1603 |
// In 64-bit VM size calculation is very complex. Emitting instructions |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1604 |
// into scratch buffer is used to get size in 64-bit VM. |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1605 |
LP64_ONLY( assert(!do_size, "this method calculates size only for 32-bit VM"); ) |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1606 |
assert(ireg == Op_VecS || // 32bit vector |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1607 |
(src_lo & 1) == 0 && (src_lo + 1) == src_hi && |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1608 |
(dst_lo & 1) == 0 && (dst_lo + 1) == dst_hi, |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1609 |
"no non-adjacent vector moves" ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1610 |
if (cbuf) { |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1611 |
MacroAssembler _masm(cbuf); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1612 |
int offset = __ offset(); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1613 |
switch (ireg) { |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1614 |
case Op_VecS: // copy whole register |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1615 |
case Op_VecD: |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1616 |
case Op_VecX: |
53580
6121eee15c23
8217371: Incorrect LP64 guard in x86.ad after JDK-8210764 (Update avx512 implementation)
sviswanathan
parents:
53436
diff
changeset
|
1617 |
#ifndef _LP64 |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1618 |
__ movdqu(as_XMMRegister(Matcher::_regEncode[dst_lo]), as_XMMRegister(Matcher::_regEncode[src_lo])); |
51857 | 1619 |
#else |
1620 |
if ((UseAVX < 3) || VM_Version::supports_avx512vl()) { |
|
1621 |
__ movdqu(as_XMMRegister(Matcher::_regEncode[dst_lo]), as_XMMRegister(Matcher::_regEncode[src_lo])); |
|
1622 |
} else { |
|
53580
6121eee15c23
8217371: Incorrect LP64 guard in x86.ad after JDK-8210764 (Update avx512 implementation)
sviswanathan
parents:
53436
diff
changeset
|
1623 |
__ vextractf32x4(as_XMMRegister(Matcher::_regEncode[dst_lo]), as_XMMRegister(Matcher::_regEncode[src_lo]), 0x0); |
51857 | 1624 |
} |
1625 |
#endif |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1626 |
break; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1627 |
case Op_VecY: |
53580
6121eee15c23
8217371: Incorrect LP64 guard in x86.ad after JDK-8210764 (Update avx512 implementation)
sviswanathan
parents:
53436
diff
changeset
|
1628 |
#ifndef _LP64 |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1629 |
__ vmovdqu(as_XMMRegister(Matcher::_regEncode[dst_lo]), as_XMMRegister(Matcher::_regEncode[src_lo])); |
51857 | 1630 |
#else |
1631 |
if ((UseAVX < 3) || VM_Version::supports_avx512vl()) { |
|
1632 |
__ vmovdqu(as_XMMRegister(Matcher::_regEncode[dst_lo]), as_XMMRegister(Matcher::_regEncode[src_lo])); |
|
1633 |
} else { |
|
53580
6121eee15c23
8217371: Incorrect LP64 guard in x86.ad after JDK-8210764 (Update avx512 implementation)
sviswanathan
parents:
53436
diff
changeset
|
1634 |
__ vextractf64x4(as_XMMRegister(Matcher::_regEncode[dst_lo]), as_XMMRegister(Matcher::_regEncode[src_lo]), 0x0); |
51857 | 1635 |
} |
1636 |
#endif |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1637 |
break; |
30624 | 1638 |
case Op_VecZ: |
38049 | 1639 |
__ evmovdquq(as_XMMRegister(Matcher::_regEncode[dst_lo]), as_XMMRegister(Matcher::_regEncode[src_lo]), 2); |
30624 | 1640 |
break; |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1641 |
default: |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1642 |
ShouldNotReachHere(); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1643 |
} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1644 |
int size = __ offset() - offset; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1645 |
#ifdef ASSERT |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1646 |
// VEX_2bytes prefix is used if UseAVX > 0, so it takes the same 2 bytes as SIMD prefix. |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1647 |
assert(!do_size || size == 4, "incorrect size calculattion"); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1648 |
#endif |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1649 |
return size; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1650 |
#ifndef PRODUCT |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1651 |
} else if (!do_size) { |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1652 |
switch (ireg) { |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1653 |
case Op_VecS: |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1654 |
case Op_VecD: |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1655 |
case Op_VecX: |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1656 |
st->print("movdqu %s,%s\t# spill",Matcher::regName[dst_lo],Matcher::regName[src_lo]); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1657 |
break; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1658 |
case Op_VecY: |
30624 | 1659 |
case Op_VecZ: |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1660 |
st->print("vmovdqu %s,%s\t# spill",Matcher::regName[dst_lo],Matcher::regName[src_lo]); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1661 |
break; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1662 |
default: |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1663 |
ShouldNotReachHere(); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1664 |
} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1665 |
#endif |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1666 |
} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1667 |
// VEX_2bytes prefix is used if UseAVX > 0, and it takes the same 2 bytes as SIMD prefix. |
30624 | 1668 |
return (UseAVX > 2) ? 6 : 4; |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1669 |
} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1670 |
|
58516
d376d86b0a01
8230565: ZGC: Redesign C2 load barrier to expand on the MachNode level
eosterlund
parents:
58462
diff
changeset
|
1671 |
int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load, |
d376d86b0a01
8230565: ZGC: Redesign C2 load barrier to expand on the MachNode level
eosterlund
parents:
58462
diff
changeset
|
1672 |
int stack_offset, int reg, uint ireg, outputStream* st) { |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1673 |
// In 64-bit VM size calculation is very complex. Emitting instructions |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1674 |
// into scratch buffer is used to get size in 64-bit VM. |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1675 |
LP64_ONLY( assert(!do_size, "this method calculates size only for 32-bit VM"); ) |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1676 |
if (cbuf) { |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1677 |
MacroAssembler _masm(cbuf); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1678 |
int offset = __ offset(); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1679 |
if (is_load) { |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1680 |
switch (ireg) { |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1681 |
case Op_VecS: |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1682 |
__ movdl(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset)); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1683 |
break; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1684 |
case Op_VecD: |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1685 |
__ movq(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset)); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1686 |
break; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1687 |
case Op_VecX: |
53580
6121eee15c23
8217371: Incorrect LP64 guard in x86.ad after JDK-8210764 (Update avx512 implementation)
sviswanathan
parents:
53436
diff
changeset
|
1688 |
#ifndef _LP64 |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1689 |
__ movdqu(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset)); |
51857 | 1690 |
#else |
1691 |
if ((UseAVX < 3) || VM_Version::supports_avx512vl()) { |
|
1692 |
__ movdqu(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset)); |
|
1693 |
} else { |
|
53580
6121eee15c23
8217371: Incorrect LP64 guard in x86.ad after JDK-8210764 (Update avx512 implementation)
sviswanathan
parents:
53436
diff
changeset
|
1694 |
__ vpxor(as_XMMRegister(Matcher::_regEncode[reg]), as_XMMRegister(Matcher::_regEncode[reg]), as_XMMRegister(Matcher::_regEncode[reg]), 2); |
6121eee15c23
8217371: Incorrect LP64 guard in x86.ad after JDK-8210764 (Update avx512 implementation)
sviswanathan
parents:
53436
diff
changeset
|
1695 |
__ vinsertf32x4(as_XMMRegister(Matcher::_regEncode[reg]), as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset),0x0); |
51857 | 1696 |
} |
1697 |
#endif |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1698 |
break; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1699 |
case Op_VecY: |
53580
6121eee15c23
8217371: Incorrect LP64 guard in x86.ad after JDK-8210764 (Update avx512 implementation)
sviswanathan
parents:
53436
diff
changeset
|
1700 |
#ifndef _LP64 |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1701 |
__ vmovdqu(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset)); |
51857 | 1702 |
#else |
1703 |
if ((UseAVX < 3) || VM_Version::supports_avx512vl()) { |
|
1704 |
__ vmovdqu(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset)); |
|
1705 |
} else { |
|
53580
6121eee15c23
8217371: Incorrect LP64 guard in x86.ad after JDK-8210764 (Update avx512 implementation)
sviswanathan
parents:
53436
diff
changeset
|
1706 |
__ vpxor(as_XMMRegister(Matcher::_regEncode[reg]), as_XMMRegister(Matcher::_regEncode[reg]), as_XMMRegister(Matcher::_regEncode[reg]), 2); |
6121eee15c23
8217371: Incorrect LP64 guard in x86.ad after JDK-8210764 (Update avx512 implementation)
sviswanathan
parents:
53436
diff
changeset
|
1707 |
__ vinsertf64x4(as_XMMRegister(Matcher::_regEncode[reg]), as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset),0x0); |
51857 | 1708 |
} |
1709 |
#endif |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1710 |
break; |
30624 | 1711 |
case Op_VecZ: |
38049 | 1712 |
__ evmovdquq(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset), 2); |
30624 | 1713 |
break; |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1714 |
default: |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1715 |
ShouldNotReachHere(); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1716 |
} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1717 |
} else { // store |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1718 |
switch (ireg) { |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1719 |
case Op_VecS: |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1720 |
__ movdl(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg])); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1721 |
break; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1722 |
case Op_VecD: |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1723 |
__ movq(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg])); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1724 |
break; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1725 |
case Op_VecX: |
53580
6121eee15c23
8217371: Incorrect LP64 guard in x86.ad after JDK-8210764 (Update avx512 implementation)
sviswanathan
parents:
53436
diff
changeset
|
1726 |
#ifndef _LP64 |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1727 |
__ movdqu(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg])); |
51857 | 1728 |
#else |
1729 |
if ((UseAVX < 3) || VM_Version::supports_avx512vl()) { |
|
1730 |
__ movdqu(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg])); |
|
1731 |
} |
|
1732 |
else { |
|
53580
6121eee15c23
8217371: Incorrect LP64 guard in x86.ad after JDK-8210764 (Update avx512 implementation)
sviswanathan
parents:
53436
diff
changeset
|
1733 |
__ vextractf32x4(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg]), 0x0); |
51857 | 1734 |
} |
1735 |
#endif |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1736 |
break; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1737 |
case Op_VecY: |
53580
6121eee15c23
8217371: Incorrect LP64 guard in x86.ad after JDK-8210764 (Update avx512 implementation)
sviswanathan
parents:
53436
diff
changeset
|
1738 |
#ifndef _LP64 |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1739 |
__ vmovdqu(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg])); |
51857 | 1740 |
#else |
1741 |
if ((UseAVX < 3) || VM_Version::supports_avx512vl()) { |
|
1742 |
__ vmovdqu(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg])); |
|
1743 |
} |
|
1744 |
else { |
|
53580
6121eee15c23
8217371: Incorrect LP64 guard in x86.ad after JDK-8210764 (Update avx512 implementation)
sviswanathan
parents:
53436
diff
changeset
|
1745 |
__ vextractf64x4(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg]), 0x0); |
51857 | 1746 |
} |
1747 |
#endif |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1748 |
break; |
30624 | 1749 |
case Op_VecZ: |
38049 | 1750 |
__ evmovdquq(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg]), 2); |
30624 | 1751 |
break; |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1752 |
default: |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1753 |
ShouldNotReachHere(); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1754 |
} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1755 |
} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1756 |
int size = __ offset() - offset; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1757 |
#ifdef ASSERT |
30624 | 1758 |
int offset_size = (stack_offset == 0) ? 0 : ((stack_offset < 0x80) ? 1 : (UseAVX > 2) ? 6 : 4); |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1759 |
// VEX_2bytes prefix is used if UseAVX > 0, so it takes the same 2 bytes as SIMD prefix. |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1760 |
assert(!do_size || size == (5+offset_size), "incorrect size calculattion"); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1761 |
#endif |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1762 |
return size; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1763 |
#ifndef PRODUCT |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1764 |
} else if (!do_size) { |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1765 |
if (is_load) { |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1766 |
switch (ireg) { |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1767 |
case Op_VecS: |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1768 |
st->print("movd %s,[rsp + %d]\t# spill", Matcher::regName[reg], stack_offset); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1769 |
break; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1770 |
case Op_VecD: |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1771 |
st->print("movq %s,[rsp + %d]\t# spill", Matcher::regName[reg], stack_offset); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1772 |
break; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1773 |
case Op_VecX: |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1774 |
st->print("movdqu %s,[rsp + %d]\t# spill", Matcher::regName[reg], stack_offset); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1775 |
break; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1776 |
case Op_VecY: |
30624 | 1777 |
case Op_VecZ: |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1778 |
st->print("vmovdqu %s,[rsp + %d]\t# spill", Matcher::regName[reg], stack_offset); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1779 |
break; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1780 |
default: |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1781 |
ShouldNotReachHere(); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1782 |
} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1783 |
} else { // store |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1784 |
switch (ireg) { |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1785 |
case Op_VecS: |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1786 |
st->print("movd [rsp + %d],%s\t# spill", stack_offset, Matcher::regName[reg]); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1787 |
break; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1788 |
case Op_VecD: |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1789 |
st->print("movq [rsp + %d],%s\t# spill", stack_offset, Matcher::regName[reg]); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1790 |
break; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1791 |
case Op_VecX: |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1792 |
st->print("movdqu [rsp + %d],%s\t# spill", stack_offset, Matcher::regName[reg]); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1793 |
break; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1794 |
case Op_VecY: |
30624 | 1795 |
case Op_VecZ: |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1796 |
st->print("vmovdqu [rsp + %d],%s\t# spill", stack_offset, Matcher::regName[reg]); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1797 |
break; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1798 |
default: |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1799 |
ShouldNotReachHere(); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1800 |
} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1801 |
} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1802 |
#endif |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1803 |
} |
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1804 |
bool is_single_byte = false; |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1805 |
int vec_len = 0; |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1806 |
if ((UseAVX > 2) && (stack_offset != 0)) { |
34162 | 1807 |
int tuple_type = Assembler::EVEX_FVM; |
1808 |
int input_size = Assembler::EVEX_32bit; |
|
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1809 |
switch (ireg) { |
34162 | 1810 |
case Op_VecS: |
1811 |
tuple_type = Assembler::EVEX_T1S; |
|
1812 |
break; |
|
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1813 |
case Op_VecD: |
34162 | 1814 |
tuple_type = Assembler::EVEX_T1S; |
1815 |
input_size = Assembler::EVEX_64bit; |
|
1816 |
break; |
|
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1817 |
case Op_VecX: |
34162 | 1818 |
break; |
1819 |
case Op_VecY: |
|
1820 |
vec_len = 1; |
|
1821 |
break; |
|
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1822 |
case Op_VecZ: |
34162 | 1823 |
vec_len = 2; |
1824 |
break; |
|
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1825 |
} |
34162 | 1826 |
is_single_byte = Assembler::query_compressed_disp_byte(stack_offset, true, vec_len, tuple_type, input_size, 0); |
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1827 |
} |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1828 |
int offset_size = 0; |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1829 |
int size = 5; |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1830 |
if (UseAVX > 2 ) { |
34162 | 1831 |
if (VM_Version::supports_avx512novl() && (vec_len == 2)) { |
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1832 |
offset_size = (stack_offset == 0) ? 0 : ((is_single_byte) ? 1 : 4); |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1833 |
size += 2; // Need an additional two bytes for EVEX encoding |
34162 | 1834 |
} else if (VM_Version::supports_avx512novl() && (vec_len < 2)) { |
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1835 |
offset_size = (stack_offset == 0) ? 0 : ((stack_offset <= 127) ? 1 : 4); |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1836 |
} else { |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1837 |
offset_size = (stack_offset == 0) ? 0 : ((is_single_byte) ? 1 : 4); |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1838 |
size += 2; // Need an additional two bytes for EVEX encodding |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1839 |
} |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1840 |
} else { |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1841 |
offset_size = (stack_offset == 0) ? 0 : ((stack_offset <= 127) ? 1 : 4); |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1842 |
} |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1843 |
// VEX_2bytes prefix is used if UseAVX > 0, so it takes the same 2 bytes as SIMD prefix. |
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
1844 |
return size+offset_size; |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1845 |
} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1846 |
|
40040
7644f470d923
8160425: Vectorization with signalling NaN returns wrong result
thartmann
parents:
39253
diff
changeset
|
1847 |
static inline jint replicate4_imm(int con, int width) { |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1848 |
// Load a constant of "width" (in bytes) and replicate it to fill 32bit. |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1849 |
assert(width == 1 || width == 2, "only byte or short types here"); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1850 |
int bit_width = width * 8; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1851 |
jint val = con; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1852 |
val &= (1 << bit_width) - 1; // mask off sign bits |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1853 |
while(bit_width < 32) { |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1854 |
val |= (val << bit_width); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1855 |
bit_width <<= 1; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1856 |
} |
40040
7644f470d923
8160425: Vectorization with signalling NaN returns wrong result
thartmann
parents:
39253
diff
changeset
|
1857 |
return val; |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1858 |
} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1859 |
|
40040
7644f470d923
8160425: Vectorization with signalling NaN returns wrong result
thartmann
parents:
39253
diff
changeset
|
1860 |
static inline jlong replicate8_imm(int con, int width) { |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1861 |
// Load a constant of "width" (in bytes) and replicate it to fill 64bit. |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1862 |
assert(width == 1 || width == 2 || width == 4, "only byte, short or int types here"); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1863 |
int bit_width = width * 8; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1864 |
jlong val = con; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1865 |
val &= (((jlong) 1) << bit_width) - 1; // mask off sign bits |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1866 |
while(bit_width < 64) { |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1867 |
val |= (val << bit_width); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1868 |
bit_width <<= 1; |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1869 |
} |
40040
7644f470d923
8160425: Vectorization with signalling NaN returns wrong result
thartmann
parents:
39253
diff
changeset
|
1870 |
return val; |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1871 |
} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1872 |
|
11794 | 1873 |
#ifndef PRODUCT |
1874 |
void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const { |
|
1875 |
st->print("nop \t# %d bytes pad for loops and calls", _count); |
|
1876 |
} |
|
1877 |
#endif |
|
1878 |
||
1879 |
void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const { |
|
1880 |
MacroAssembler _masm(&cbuf); |
|
1881 |
__ nop(_count); |
|
1882 |
} |
|
1883 |
||
1884 |
uint MachNopNode::size(PhaseRegAlloc*) const { |
|
1885 |
return _count; |
|
1886 |
} |
|
1887 |
||
1888 |
#ifndef PRODUCT |
|
1889 |
void MachBreakpointNode::format(PhaseRegAlloc*, outputStream* st) const { |
|
1890 |
st->print("# breakpoint"); |
|
1891 |
} |
|
1892 |
#endif |
|
1893 |
||
1894 |
void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const { |
|
1895 |
MacroAssembler _masm(&cbuf); |
|
1896 |
__ int3(); |
|
1897 |
} |
|
1898 |
||
1899 |
uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const { |
|
1900 |
return MachNode::size(ra_); |
|
1901 |
} |
|
1902 |
||
1903 |
%} |
|
1904 |
||
1905 |
encode %{ |
|
1906 |
||
1907 |
enc_class call_epilog %{ |
|
1908 |
if (VerifyStackAtCalls) { |
|
1909 |
// Check that stack depth is unchanged: find majik cookie on stack |
|
1910 |
int framesize = ra_->reg2offset_unchecked(OptoReg::add(ra_->_matcher._old_SP, -3*VMRegImpl::slots_per_word)); |
|
1911 |
MacroAssembler _masm(&cbuf); |
|
1912 |
Label L; |
|
1913 |
__ cmpptr(Address(rsp, framesize), (int32_t)0xbadb100d); |
|
1914 |
__ jccb(Assembler::equal, L); |
|
1915 |
// Die if stack mismatch |
|
1916 |
__ int3(); |
|
1917 |
__ bind(L); |
|
1918 |
} |
|
1919 |
%} |
|
1920 |
||
11429 | 1921 |
%} |
1922 |
||
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1923 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1924 |
//----------OPERANDS----------------------------------------------------------- |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1925 |
// Operand definitions must precede instruction definitions for correct parsing |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1926 |
// in the ADLC because operands constitute user defined types which are used in |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1927 |
// instruction definitions. |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1928 |
|
30624 | 1929 |
operand vecZ() %{ |
1930 |
constraint(ALLOC_IN_RC(vectorz_reg)); |
|
1931 |
match(VecZ); |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1932 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1933 |
format %{ %} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1934 |
interface(REG_INTER); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1935 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
1936 |
|
51857 | 1937 |
operand legVecZ() %{ |
1938 |
constraint(ALLOC_IN_RC(vectorz_reg_vl)); |
|
1939 |
match(VecZ); |
|
1940 |
||
1941 |
format %{ %} |
|
1942 |
interface(REG_INTER); |
|
1943 |
%} |
|
1944 |
||
33469
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
1945 |
// Comparison Code for FP conditional move |
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
1946 |
operand cmpOp_vcmppd() %{ |
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
1947 |
match(Bool); |
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
1948 |
|
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
1949 |
predicate(n->as_Bool()->_test._test != BoolTest::overflow && |
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
1950 |
n->as_Bool()->_test._test != BoolTest::no_overflow); |
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
1951 |
format %{ "" %} |
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
1952 |
interface(COND_INTER) %{ |
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
1953 |
equal (0x0, "eq"); |
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
1954 |
less (0x1, "lt"); |
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
1955 |
less_equal (0x2, "le"); |
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
1956 |
not_equal (0xC, "ne"); |
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
1957 |
greater_equal(0xD, "ge"); |
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
1958 |
greater (0xE, "gt"); |
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
1959 |
//TODO cannot compile (adlc breaks) without two next lines with error: |
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
1960 |
// x86_64.ad(13987) Syntax Error: :In operand cmpOp_vcmppd: Do not support this encode constant: ' %{ |
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
1961 |
// equal' for overflow. |
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
1962 |
overflow (0x20, "o"); // not really supported by the instruction |
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
1963 |
no_overflow (0x21, "no"); // not really supported by the instruction |
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
1964 |
%} |
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
1965 |
%} |
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
1966 |
|
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
1967 |
|
11429 | 1968 |
// INSTRUCTIONS -- Platform independent definitions (same for 32- and 64-bit) |
1969 |
||
11794 | 1970 |
// ============================================================================ |
1971 |
||
1972 |
instruct ShouldNotReachHere() %{ |
|
1973 |
match(Halt); |
|
46525
3a5c833a43de
8176506: C2: loop unswitching and unsafe accesses cause crash
roland
parents:
46378
diff
changeset
|
1974 |
format %{ "ud2\t# ShouldNotReachHere" %} |
3a5c833a43de
8176506: C2: loop unswitching and unsafe accesses cause crash
roland
parents:
46378
diff
changeset
|
1975 |
ins_encode %{ |
58061
fafba5cf3546
8225653: Provide more information when hitting SIGILL from HaltNode
chagedorn
parents:
57804
diff
changeset
|
1976 |
__ stop(_halt_reason); |
11794 | 1977 |
%} |
1978 |
ins_pipe(pipe_slow); |
|
1979 |
%} |
|
1980 |
||
38049 | 1981 |
// =================================EVEX special=============================== |
1982 |
||
1983 |
instruct setMask(rRegI dst, rRegI src) %{ |
|
1984 |
predicate(Matcher::has_predicated_vectors()); |
|
1985 |
match(Set dst (SetVectMaskI src)); |
|
1986 |
effect(TEMP dst); |
|
1987 |
format %{ "setvectmask $dst, $src" %} |
|
1988 |
ins_encode %{ |
|
1989 |
__ setvectmask($dst$$Register, $src$$Register); |
|
1990 |
%} |
|
1991 |
ins_pipe(pipe_slow); |
|
1992 |
%} |
|
1993 |
||
11794 | 1994 |
// ============================================================================ |
1995 |
||
11429 | 1996 |
instruct addF_reg(regF dst, regF src) %{ |
1997 |
predicate((UseSSE>=1) && (UseAVX == 0)); |
|
1998 |
match(Set dst (AddF dst src)); |
|
1999 |
||
2000 |
format %{ "addss $dst, $src" %} |
|
2001 |
ins_cost(150); |
|
2002 |
ins_encode %{ |
|
2003 |
__ addss($dst$$XMMRegister, $src$$XMMRegister); |
|
2004 |
%} |
|
2005 |
ins_pipe(pipe_slow); |
|
2006 |
%} |
|
2007 |
||
2008 |
instruct addF_mem(regF dst, memory src) %{ |
|
2009 |
predicate((UseSSE>=1) && (UseAVX == 0)); |
|
2010 |
match(Set dst (AddF dst (LoadF src))); |
|
2011 |
||
2012 |
format %{ "addss $dst, $src" %} |
|
2013 |
ins_cost(150); |
|
2014 |
ins_encode %{ |
|
2015 |
__ addss($dst$$XMMRegister, $src$$Address); |
|
2016 |
%} |
|
2017 |
ins_pipe(pipe_slow); |
|
2018 |
%} |
|
2019 |
||
2020 |
instruct addF_imm(regF dst, immF con) %{ |
|
2021 |
predicate((UseSSE>=1) && (UseAVX == 0)); |
|
2022 |
match(Set dst (AddF dst con)); |
|
2023 |
format %{ "addss $dst, [$constantaddress]\t# load from constant table: float=$con" %} |
|
2024 |
ins_cost(150); |
|
2025 |
ins_encode %{ |
|
2026 |
__ addss($dst$$XMMRegister, $constantaddress($con)); |
|
2027 |
%} |
|
2028 |
ins_pipe(pipe_slow); |
|
2029 |
%} |
|
2030 |
||
13294 | 2031 |
instruct addF_reg_reg(regF dst, regF src1, regF src2) %{ |
11429 | 2032 |
predicate(UseAVX > 0); |
2033 |
match(Set dst (AddF src1 src2)); |
|
2034 |
||
2035 |
format %{ "vaddss $dst, $src1, $src2" %} |
|
2036 |
ins_cost(150); |
|
2037 |
ins_encode %{ |
|
2038 |
__ vaddss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); |
|
2039 |
%} |
|
2040 |
ins_pipe(pipe_slow); |
|
2041 |
%} |
|
2042 |
||
13294 | 2043 |
instruct addF_reg_mem(regF dst, regF src1, memory src2) %{ |
11429 | 2044 |
predicate(UseAVX > 0); |
2045 |
match(Set dst (AddF src1 (LoadF src2))); |
|
2046 |
||
2047 |
format %{ "vaddss $dst, $src1, $src2" %} |
|
2048 |
ins_cost(150); |
|
2049 |
ins_encode %{ |
|
2050 |
__ vaddss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); |
|
2051 |
%} |
|
2052 |
ins_pipe(pipe_slow); |
|
2053 |
%} |
|
2054 |
||
13294 | 2055 |
instruct addF_reg_imm(regF dst, regF src, immF con) %{ |
11429 | 2056 |
predicate(UseAVX > 0); |
2057 |
match(Set dst (AddF src con)); |
|
2058 |
||
2059 |
format %{ "vaddss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %} |
|
2060 |
ins_cost(150); |
|
2061 |
ins_encode %{ |
|
2062 |
__ vaddss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); |
|
2063 |
%} |
|
2064 |
ins_pipe(pipe_slow); |
|
2065 |
%} |
|
2066 |
||
2067 |
instruct addD_reg(regD dst, regD src) %{ |
|
2068 |
predicate((UseSSE>=2) && (UseAVX == 0)); |
|
2069 |
match(Set dst (AddD dst src)); |
|
2070 |
||
2071 |
format %{ "addsd $dst, $src" %} |
|
2072 |
ins_cost(150); |
|
2073 |
ins_encode %{ |
|
2074 |
__ addsd($dst$$XMMRegister, $src$$XMMRegister); |
|
2075 |
%} |
|
2076 |
ins_pipe(pipe_slow); |
|
2077 |
%} |
|
2078 |
||
2079 |
instruct addD_mem(regD dst, memory src) %{ |
|
2080 |
predicate((UseSSE>=2) && (UseAVX == 0)); |
|
2081 |
match(Set dst (AddD dst (LoadD src))); |
|
2082 |
||
2083 |
format %{ "addsd $dst, $src" %} |
|
2084 |
ins_cost(150); |
|
2085 |
ins_encode %{ |
|
2086 |
__ addsd($dst$$XMMRegister, $src$$Address); |
|
2087 |
%} |
|
2088 |
ins_pipe(pipe_slow); |
|
2089 |
%} |
|
2090 |
||
2091 |
instruct addD_imm(regD dst, immD con) %{ |
|
2092 |
predicate((UseSSE>=2) && (UseAVX == 0)); |
|
2093 |
match(Set dst (AddD dst con)); |
|
2094 |
format %{ "addsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} |
|
2095 |
ins_cost(150); |
|
2096 |
ins_encode %{ |
|
2097 |
__ addsd($dst$$XMMRegister, $constantaddress($con)); |
|
2098 |
%} |
|
2099 |
ins_pipe(pipe_slow); |
|
2100 |
%} |
|
2101 |
||
13294 | 2102 |
instruct addD_reg_reg(regD dst, regD src1, regD src2) %{ |
11429 | 2103 |
predicate(UseAVX > 0); |
2104 |
match(Set dst (AddD src1 src2)); |
|
2105 |
||
2106 |
format %{ "vaddsd $dst, $src1, $src2" %} |
|
2107 |
ins_cost(150); |
|
2108 |
ins_encode %{ |
|
2109 |
__ vaddsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); |
|
2110 |
%} |
|
2111 |
ins_pipe(pipe_slow); |
|
2112 |
%} |
|
2113 |
||
13294 | 2114 |
instruct addD_reg_mem(regD dst, regD src1, memory src2) %{ |
11429 | 2115 |
predicate(UseAVX > 0); |
2116 |
match(Set dst (AddD src1 (LoadD src2))); |
|
2117 |
||
2118 |
format %{ "vaddsd $dst, $src1, $src2" %} |
|
2119 |
ins_cost(150); |
|
2120 |
ins_encode %{ |
|
2121 |
__ vaddsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); |
|
2122 |
%} |
|
2123 |
ins_pipe(pipe_slow); |
|
2124 |
%} |
|
2125 |
||
13294 | 2126 |
instruct addD_reg_imm(regD dst, regD src, immD con) %{ |
11429 | 2127 |
predicate(UseAVX > 0); |
2128 |
match(Set dst (AddD src con)); |
|
2129 |
||
2130 |
format %{ "vaddsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %} |
|
2131 |
ins_cost(150); |
|
2132 |
ins_encode %{ |
|
2133 |
__ vaddsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); |
|
2134 |
%} |
|
2135 |
ins_pipe(pipe_slow); |
|
2136 |
%} |
|
2137 |
||
2138 |
instruct subF_reg(regF dst, regF src) %{ |
|
2139 |
predicate((UseSSE>=1) && (UseAVX == 0)); |
|
2140 |
match(Set dst (SubF dst src)); |
|
2141 |
||
2142 |
format %{ "subss $dst, $src" %} |
|
2143 |
ins_cost(150); |
|
2144 |
ins_encode %{ |
|
2145 |
__ subss($dst$$XMMRegister, $src$$XMMRegister); |
|
2146 |
%} |
|
2147 |
ins_pipe(pipe_slow); |
|
2148 |
%} |
|
2149 |
||
2150 |
instruct subF_mem(regF dst, memory src) %{ |
|
2151 |
predicate((UseSSE>=1) && (UseAVX == 0)); |
|
2152 |
match(Set dst (SubF dst (LoadF src))); |
|
2153 |
||
2154 |
format %{ "subss $dst, $src" %} |
|
2155 |
ins_cost(150); |
|
2156 |
ins_encode %{ |
|
2157 |
__ subss($dst$$XMMRegister, $src$$Address); |
|
2158 |
%} |
|
2159 |
ins_pipe(pipe_slow); |
|
2160 |
%} |
|
2161 |
||
2162 |
instruct subF_imm(regF dst, immF con) %{ |
|
2163 |
predicate((UseSSE>=1) && (UseAVX == 0)); |
|
2164 |
match(Set dst (SubF dst con)); |
|
2165 |
format %{ "subss $dst, [$constantaddress]\t# load from constant table: float=$con" %} |
|
2166 |
ins_cost(150); |
|
2167 |
ins_encode %{ |
|
2168 |
__ subss($dst$$XMMRegister, $constantaddress($con)); |
|
2169 |
%} |
|
2170 |
ins_pipe(pipe_slow); |
|
2171 |
%} |
|
2172 |
||
13294 | 2173 |
instruct subF_reg_reg(regF dst, regF src1, regF src2) %{ |
11429 | 2174 |
predicate(UseAVX > 0); |
2175 |
match(Set dst (SubF src1 src2)); |
|
2176 |
||
2177 |
format %{ "vsubss $dst, $src1, $src2" %} |
|
2178 |
ins_cost(150); |
|
2179 |
ins_encode %{ |
|
2180 |
__ vsubss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); |
|
2181 |
%} |
|
2182 |
ins_pipe(pipe_slow); |
|
2183 |
%} |
|
2184 |
||
13294 | 2185 |
instruct subF_reg_mem(regF dst, regF src1, memory src2) %{ |
11429 | 2186 |
predicate(UseAVX > 0); |
2187 |
match(Set dst (SubF src1 (LoadF src2))); |
|
2188 |
||
2189 |
format %{ "vsubss $dst, $src1, $src2" %} |
|
2190 |
ins_cost(150); |
|
2191 |
ins_encode %{ |
|
2192 |
__ vsubss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); |
|
2193 |
%} |
|
2194 |
ins_pipe(pipe_slow); |
|
2195 |
%} |
|
2196 |
||
13294 | 2197 |
instruct subF_reg_imm(regF dst, regF src, immF con) %{ |
11429 | 2198 |
predicate(UseAVX > 0); |
2199 |
match(Set dst (SubF src con)); |
|
2200 |
||
2201 |
format %{ "vsubss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %} |
|
2202 |
ins_cost(150); |
|
2203 |
ins_encode %{ |
|
2204 |
__ vsubss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); |
|
2205 |
%} |
|
2206 |
ins_pipe(pipe_slow); |
|
2207 |
%} |
|
2208 |
||
2209 |
instruct subD_reg(regD dst, regD src) %{ |
|
2210 |
predicate((UseSSE>=2) && (UseAVX == 0)); |
|
2211 |
match(Set dst (SubD dst src)); |
|
2212 |
||
2213 |
format %{ "subsd $dst, $src" %} |
|
2214 |
ins_cost(150); |
|
2215 |
ins_encode %{ |
|
2216 |
__ subsd($dst$$XMMRegister, $src$$XMMRegister); |
|
2217 |
%} |
|
2218 |
ins_pipe(pipe_slow); |
|
2219 |
%} |
|
2220 |
||
2221 |
instruct subD_mem(regD dst, memory src) %{ |
|
2222 |
predicate((UseSSE>=2) && (UseAVX == 0)); |
|
2223 |
match(Set dst (SubD dst (LoadD src))); |
|
2224 |
||
2225 |
format %{ "subsd $dst, $src" %} |
|
2226 |
ins_cost(150); |
|
2227 |
ins_encode %{ |
|
2228 |
__ subsd($dst$$XMMRegister, $src$$Address); |
|
2229 |
%} |
|
2230 |
ins_pipe(pipe_slow); |
|
2231 |
%} |
|
2232 |
||
2233 |
instruct subD_imm(regD dst, immD con) %{ |
|
2234 |
predicate((UseSSE>=2) && (UseAVX == 0)); |
|
2235 |
match(Set dst (SubD dst con)); |
|
2236 |
format %{ "subsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} |
|
2237 |
ins_cost(150); |
|
2238 |
ins_encode %{ |
|
2239 |
__ subsd($dst$$XMMRegister, $constantaddress($con)); |
|
2240 |
%} |
|
2241 |
ins_pipe(pipe_slow); |
|
2242 |
%} |
|
2243 |
||
13294 | 2244 |
instruct subD_reg_reg(regD dst, regD src1, regD src2) %{ |
11429 | 2245 |
predicate(UseAVX > 0); |
2246 |
match(Set dst (SubD src1 src2)); |
|
2247 |
||
2248 |
format %{ "vsubsd $dst, $src1, $src2" %} |
|
2249 |
ins_cost(150); |
|
2250 |
ins_encode %{ |
|
2251 |
__ vsubsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); |
|
2252 |
%} |
|
2253 |
ins_pipe(pipe_slow); |
|
2254 |
%} |
|
2255 |
||
13294 | 2256 |
instruct subD_reg_mem(regD dst, regD src1, memory src2) %{ |
11429 | 2257 |
predicate(UseAVX > 0); |
2258 |
match(Set dst (SubD src1 (LoadD src2))); |
|
2259 |
||
2260 |
format %{ "vsubsd $dst, $src1, $src2" %} |
|
2261 |
ins_cost(150); |
|
2262 |
ins_encode %{ |
|
2263 |
__ vsubsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); |
|
2264 |
%} |
|
2265 |
ins_pipe(pipe_slow); |
|
2266 |
%} |
|
2267 |
||
13294 | 2268 |
instruct subD_reg_imm(regD dst, regD src, immD con) %{ |
11429 | 2269 |
predicate(UseAVX > 0); |
2270 |
match(Set dst (SubD src con)); |
|
2271 |
||
2272 |
format %{ "vsubsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %} |
|
2273 |
ins_cost(150); |
|
2274 |
ins_encode %{ |
|
2275 |
__ vsubsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); |
|
2276 |
%} |
|
2277 |
ins_pipe(pipe_slow); |
|
2278 |
%} |
|
2279 |
||
2280 |
instruct mulF_reg(regF dst, regF src) %{ |
|
2281 |
predicate((UseSSE>=1) && (UseAVX == 0)); |
|
2282 |
match(Set dst (MulF dst src)); |
|
2283 |
||
2284 |
format %{ "mulss $dst, $src" %} |
|
2285 |
ins_cost(150); |
|
2286 |
ins_encode %{ |
|
2287 |
__ mulss($dst$$XMMRegister, $src$$XMMRegister); |
|
2288 |
%} |
|
2289 |
ins_pipe(pipe_slow); |
|
2290 |
%} |
|
2291 |
||
2292 |
instruct mulF_mem(regF dst, memory src) %{ |
|
2293 |
predicate((UseSSE>=1) && (UseAVX == 0)); |
|
2294 |
match(Set dst (MulF dst (LoadF src))); |
|
2295 |
||
2296 |
format %{ "mulss $dst, $src" %} |
|
2297 |
ins_cost(150); |
|
2298 |
ins_encode %{ |
|
2299 |
__ mulss($dst$$XMMRegister, $src$$Address); |
|
2300 |
%} |
|
2301 |
ins_pipe(pipe_slow); |
|
2302 |
%} |
|
2303 |
||
2304 |
instruct mulF_imm(regF dst, immF con) %{ |
|
2305 |
predicate((UseSSE>=1) && (UseAVX == 0)); |
|
2306 |
match(Set dst (MulF dst con)); |
|
2307 |
format %{ "mulss $dst, [$constantaddress]\t# load from constant table: float=$con" %} |
|
2308 |
ins_cost(150); |
|
2309 |
ins_encode %{ |
|
2310 |
__ mulss($dst$$XMMRegister, $constantaddress($con)); |
|
2311 |
%} |
|
2312 |
ins_pipe(pipe_slow); |
|
2313 |
%} |
|
2314 |
||
13294 | 2315 |
instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{ |
11429 | 2316 |
predicate(UseAVX > 0); |
2317 |
match(Set dst (MulF src1 src2)); |
|
2318 |
||
2319 |
format %{ "vmulss $dst, $src1, $src2" %} |
|
2320 |
ins_cost(150); |
|
2321 |
ins_encode %{ |
|
2322 |
__ vmulss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); |
|
2323 |
%} |
|
2324 |
ins_pipe(pipe_slow); |
|
2325 |
%} |
|
2326 |
||
13294 | 2327 |
instruct mulF_reg_mem(regF dst, regF src1, memory src2) %{ |
11429 | 2328 |
predicate(UseAVX > 0); |
2329 |
match(Set dst (MulF src1 (LoadF src2))); |
|
2330 |
||
2331 |
format %{ "vmulss $dst, $src1, $src2" %} |
|
2332 |
ins_cost(150); |
|
2333 |
ins_encode %{ |
|
2334 |
__ vmulss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); |
|
2335 |
%} |
|
2336 |
ins_pipe(pipe_slow); |
|
2337 |
%} |
|
2338 |
||
13294 | 2339 |
instruct mulF_reg_imm(regF dst, regF src, immF con) %{ |
11429 | 2340 |
predicate(UseAVX > 0); |
2341 |
match(Set dst (MulF src con)); |
|
2342 |
||
2343 |
format %{ "vmulss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %} |
|
2344 |
ins_cost(150); |
|
2345 |
ins_encode %{ |
|
2346 |
__ vmulss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); |
|
2347 |
%} |
|
2348 |
ins_pipe(pipe_slow); |
|
2349 |
%} |
|
2350 |
||
2351 |
instruct mulD_reg(regD dst, regD src) %{ |
|
2352 |
predicate((UseSSE>=2) && (UseAVX == 0)); |
|
2353 |
match(Set dst (MulD dst src)); |
|
2354 |
||
2355 |
format %{ "mulsd $dst, $src" %} |
|
2356 |
ins_cost(150); |
|
2357 |
ins_encode %{ |
|
2358 |
__ mulsd($dst$$XMMRegister, $src$$XMMRegister); |
|
2359 |
%} |
|
2360 |
ins_pipe(pipe_slow); |
|
2361 |
%} |
|
2362 |
||
2363 |
instruct mulD_mem(regD dst, memory src) %{ |
|
2364 |
predicate((UseSSE>=2) && (UseAVX == 0)); |
|
2365 |
match(Set dst (MulD dst (LoadD src))); |
|
2366 |
||
2367 |
format %{ "mulsd $dst, $src" %} |
|
2368 |
ins_cost(150); |
|
2369 |
ins_encode %{ |
|
2370 |
__ mulsd($dst$$XMMRegister, $src$$Address); |
|
2371 |
%} |
|
2372 |
ins_pipe(pipe_slow); |
|
2373 |
%} |
|
2374 |
||
2375 |
instruct mulD_imm(regD dst, immD con) %{ |
|
2376 |
predicate((UseSSE>=2) && (UseAVX == 0)); |
|
2377 |
match(Set dst (MulD dst con)); |
|
2378 |
format %{ "mulsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} |
|
2379 |
ins_cost(150); |
|
2380 |
ins_encode %{ |
|
2381 |
__ mulsd($dst$$XMMRegister, $constantaddress($con)); |
|
2382 |
%} |
|
2383 |
ins_pipe(pipe_slow); |
|
2384 |
%} |
|
2385 |
||
13294 | 2386 |
instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{ |
11429 | 2387 |
predicate(UseAVX > 0); |
2388 |
match(Set dst (MulD src1 src2)); |
|
2389 |
||
2390 |
format %{ "vmulsd $dst, $src1, $src2" %} |
|
2391 |
ins_cost(150); |
|
2392 |
ins_encode %{ |
|
2393 |
__ vmulsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); |
|
2394 |
%} |
|
2395 |
ins_pipe(pipe_slow); |
|
2396 |
%} |
|
2397 |
||
13294 | 2398 |
instruct mulD_reg_mem(regD dst, regD src1, memory src2) %{ |
11429 | 2399 |
predicate(UseAVX > 0); |
2400 |
match(Set dst (MulD src1 (LoadD src2))); |
|
2401 |
||
2402 |
format %{ "vmulsd $dst, $src1, $src2" %} |
|
2403 |
ins_cost(150); |
|
2404 |
ins_encode %{ |
|
2405 |
__ vmulsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); |
|
2406 |
%} |
|
2407 |
ins_pipe(pipe_slow); |
|
2408 |
%} |
|
2409 |
||
13294 | 2410 |
instruct mulD_reg_imm(regD dst, regD src, immD con) %{ |
11429 | 2411 |
predicate(UseAVX > 0); |
2412 |
match(Set dst (MulD src con)); |
|
2413 |
||
2414 |
format %{ "vmulsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %} |
|
2415 |
ins_cost(150); |
|
2416 |
ins_encode %{ |
|
2417 |
__ vmulsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); |
|
2418 |
%} |
|
2419 |
ins_pipe(pipe_slow); |
|
2420 |
%} |
|
2421 |
||
2422 |
instruct divF_reg(regF dst, regF src) %{ |
|
2423 |
predicate((UseSSE>=1) && (UseAVX == 0)); |
|
2424 |
match(Set dst (DivF dst src)); |
|
2425 |
||
2426 |
format %{ "divss $dst, $src" %} |
|
2427 |
ins_cost(150); |
|
2428 |
ins_encode %{ |
|
2429 |
__ divss($dst$$XMMRegister, $src$$XMMRegister); |
|
2430 |
%} |
|
2431 |
ins_pipe(pipe_slow); |
|
2432 |
%} |
|
2433 |
||
2434 |
instruct divF_mem(regF dst, memory src) %{ |
|
2435 |
predicate((UseSSE>=1) && (UseAVX == 0)); |
|
2436 |
match(Set dst (DivF dst (LoadF src))); |
|
2437 |
||
2438 |
format %{ "divss $dst, $src" %} |
|
2439 |
ins_cost(150); |
|
2440 |
ins_encode %{ |
|
2441 |
__ divss($dst$$XMMRegister, $src$$Address); |
|
2442 |
%} |
|
2443 |
ins_pipe(pipe_slow); |
|
2444 |
%} |
|
2445 |
||
2446 |
instruct divF_imm(regF dst, immF con) %{ |
|
2447 |
predicate((UseSSE>=1) && (UseAVX == 0)); |
|
2448 |
match(Set dst (DivF dst con)); |
|
2449 |
format %{ "divss $dst, [$constantaddress]\t# load from constant table: float=$con" %} |
|
2450 |
ins_cost(150); |
|
2451 |
ins_encode %{ |
|
2452 |
__ divss($dst$$XMMRegister, $constantaddress($con)); |
|
2453 |
%} |
|
2454 |
ins_pipe(pipe_slow); |
|
2455 |
%} |
|
2456 |
||
13294 | 2457 |
instruct divF_reg_reg(regF dst, regF src1, regF src2) %{ |
11429 | 2458 |
predicate(UseAVX > 0); |
2459 |
match(Set dst (DivF src1 src2)); |
|
2460 |
||
2461 |
format %{ "vdivss $dst, $src1, $src2" %} |
|
2462 |
ins_cost(150); |
|
2463 |
ins_encode %{ |
|
2464 |
__ vdivss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); |
|
2465 |
%} |
|
2466 |
ins_pipe(pipe_slow); |
|
2467 |
%} |
|
2468 |
||
13294 | 2469 |
instruct divF_reg_mem(regF dst, regF src1, memory src2) %{ |
11429 | 2470 |
predicate(UseAVX > 0); |
2471 |
match(Set dst (DivF src1 (LoadF src2))); |
|
2472 |
||
2473 |
format %{ "vdivss $dst, $src1, $src2" %} |
|
2474 |
ins_cost(150); |
|
2475 |
ins_encode %{ |
|
2476 |
__ vdivss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); |
|
2477 |
%} |
|
2478 |
ins_pipe(pipe_slow); |
|
2479 |
%} |
|
2480 |
||
13294 | 2481 |
instruct divF_reg_imm(regF dst, regF src, immF con) %{ |
11429 | 2482 |
predicate(UseAVX > 0); |
2483 |
match(Set dst (DivF src con)); |
|
2484 |
||
2485 |
format %{ "vdivss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %} |
|
2486 |
ins_cost(150); |
|
2487 |
ins_encode %{ |
|
2488 |
__ vdivss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); |
|
2489 |
%} |
|
2490 |
ins_pipe(pipe_slow); |
|
2491 |
%} |
|
2492 |
||
2493 |
instruct divD_reg(regD dst, regD src) %{ |
|
2494 |
predicate((UseSSE>=2) && (UseAVX == 0)); |
|
2495 |
match(Set dst (DivD dst src)); |
|
2496 |
||
2497 |
format %{ "divsd $dst, $src" %} |
|
2498 |
ins_cost(150); |
|
2499 |
ins_encode %{ |
|
2500 |
__ divsd($dst$$XMMRegister, $src$$XMMRegister); |
|
2501 |
%} |
|
2502 |
ins_pipe(pipe_slow); |
|
2503 |
%} |
|
2504 |
||
2505 |
instruct divD_mem(regD dst, memory src) %{ |
|
2506 |
predicate((UseSSE>=2) && (UseAVX == 0)); |
|
2507 |
match(Set dst (DivD dst (LoadD src))); |
|
2508 |
||
2509 |
format %{ "divsd $dst, $src" %} |
|
2510 |
ins_cost(150); |
|
2511 |
ins_encode %{ |
|
2512 |
__ divsd($dst$$XMMRegister, $src$$Address); |
|
2513 |
%} |
|
2514 |
ins_pipe(pipe_slow); |
|
2515 |
%} |
|
2516 |
||
2517 |
instruct divD_imm(regD dst, immD con) %{ |
|
2518 |
predicate((UseSSE>=2) && (UseAVX == 0)); |
|
2519 |
match(Set dst (DivD dst con)); |
|
2520 |
format %{ "divsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} |
|
2521 |
ins_cost(150); |
|
2522 |
ins_encode %{ |
|
2523 |
__ divsd($dst$$XMMRegister, $constantaddress($con)); |
|
2524 |
%} |
|
2525 |
ins_pipe(pipe_slow); |
|
2526 |
%} |
|
2527 |
||
13294 | 2528 |
instruct divD_reg_reg(regD dst, regD src1, regD src2) %{ |
11429 | 2529 |
predicate(UseAVX > 0); |
2530 |
match(Set dst (DivD src1 src2)); |
|
2531 |
||
2532 |
format %{ "vdivsd $dst, $src1, $src2" %} |
|
2533 |
ins_cost(150); |
|
2534 |
ins_encode %{ |
|
2535 |
__ vdivsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister); |
|
2536 |
%} |
|
2537 |
ins_pipe(pipe_slow); |
|
2538 |
%} |
|
2539 |
||
13294 | 2540 |
instruct divD_reg_mem(regD dst, regD src1, memory src2) %{ |
11429 | 2541 |
predicate(UseAVX > 0); |
2542 |
match(Set dst (DivD src1 (LoadD src2))); |
|
2543 |
||
2544 |
format %{ "vdivsd $dst, $src1, $src2" %} |
|
2545 |
ins_cost(150); |
|
2546 |
ins_encode %{ |
|
2547 |
__ vdivsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address); |
|
2548 |
%} |
|
2549 |
ins_pipe(pipe_slow); |
|
2550 |
%} |
|
2551 |
||
13294 | 2552 |
instruct divD_reg_imm(regD dst, regD src, immD con) %{ |
11429 | 2553 |
predicate(UseAVX > 0); |
2554 |
match(Set dst (DivD src con)); |
|
2555 |
||
2556 |
format %{ "vdivsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %} |
|
2557 |
ins_cost(150); |
|
2558 |
ins_encode %{ |
|
2559 |
__ vdivsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con)); |
|
2560 |
%} |
|
2561 |
ins_pipe(pipe_slow); |
|
2562 |
%} |
|
2563 |
||
2564 |
instruct absF_reg(regF dst) %{ |
|
2565 |
predicate((UseSSE>=1) && (UseAVX == 0)); |
|
2566 |
match(Set dst (AbsF dst)); |
|
2567 |
ins_cost(150); |
|
2568 |
format %{ "andps $dst, [0x7fffffff]\t# abs float by sign masking" %} |
|
2569 |
ins_encode %{ |
|
2570 |
__ andps($dst$$XMMRegister, ExternalAddress(float_signmask())); |
|
2571 |
%} |
|
2572 |
ins_pipe(pipe_slow); |
|
2573 |
%} |
|
2574 |
||
51857 | 2575 |
instruct absF_reg_reg(vlRegF dst, vlRegF src) %{ |
2576 |
predicate(UseAVX > 0); |
|
11429 | 2577 |
match(Set dst (AbsF src)); |
2578 |
ins_cost(150); |
|
2579 |
format %{ "vandps $dst, $src, [0x7fffffff]\t# abs float by sign masking" %} |
|
2580 |
ins_encode %{ |
|
30624 | 2581 |
int vector_len = 0; |
11429 | 2582 |
__ vandps($dst$$XMMRegister, $src$$XMMRegister, |
30624 | 2583 |
ExternalAddress(float_signmask()), vector_len); |
11429 | 2584 |
%} |
2585 |
ins_pipe(pipe_slow); |
|
2586 |
%} |
|
2587 |
||
2588 |
instruct absD_reg(regD dst) %{ |
|
2589 |
predicate((UseSSE>=2) && (UseAVX == 0)); |
|
2590 |
match(Set dst (AbsD dst)); |
|
2591 |
ins_cost(150); |
|
2592 |
format %{ "andpd $dst, [0x7fffffffffffffff]\t" |
|
2593 |
"# abs double by sign masking" %} |
|
2594 |
ins_encode %{ |
|
2595 |
__ andpd($dst$$XMMRegister, ExternalAddress(double_signmask())); |
|
2596 |
%} |
|
2597 |
ins_pipe(pipe_slow); |
|
2598 |
%} |
|
2599 |
||
51857 | 2600 |
instruct absD_reg_reg(vlRegD dst, vlRegD src) %{ |
2601 |
predicate(UseAVX > 0); |
|
11429 | 2602 |
match(Set dst (AbsD src)); |
2603 |
ins_cost(150); |
|
2604 |
format %{ "vandpd $dst, $src, [0x7fffffffffffffff]\t" |
|
2605 |
"# abs double by sign masking" %} |
|
2606 |
ins_encode %{ |
|
30624 | 2607 |
int vector_len = 0; |
11429 | 2608 |
__ vandpd($dst$$XMMRegister, $src$$XMMRegister, |
30624 | 2609 |
ExternalAddress(double_signmask()), vector_len); |
11429 | 2610 |
%} |
2611 |
ins_pipe(pipe_slow); |
|
2612 |
%} |
|
2613 |
||
2614 |
instruct negF_reg(regF dst) %{ |
|
2615 |
predicate((UseSSE>=1) && (UseAVX == 0)); |
|
2616 |
match(Set dst (NegF dst)); |
|
2617 |
ins_cost(150); |
|
2618 |
format %{ "xorps $dst, [0x80000000]\t# neg float by sign flipping" %} |
|
2619 |
ins_encode %{ |
|
2620 |
__ xorps($dst$$XMMRegister, ExternalAddress(float_signflip())); |
|
2621 |
%} |
|
2622 |
ins_pipe(pipe_slow); |
|
2623 |
%} |
|
2624 |
||
51857 | 2625 |
instruct negF_reg_reg(vlRegF dst, vlRegF src) %{ |
11429 | 2626 |
predicate(UseAVX > 0); |
2627 |
match(Set dst (NegF src)); |
|
2628 |
ins_cost(150); |
|
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
2629 |
format %{ "vnegatess $dst, $src, [0x80000000]\t# neg float by sign flipping" %} |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
2630 |
ins_encode %{ |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
2631 |
__ vnegatess($dst$$XMMRegister, $src$$XMMRegister, |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
2632 |
ExternalAddress(float_signflip())); |
11429 | 2633 |
%} |
2634 |
ins_pipe(pipe_slow); |
|
2635 |
%} |
|
2636 |
||
2637 |
instruct negD_reg(regD dst) %{ |
|
2638 |
predicate((UseSSE>=2) && (UseAVX == 0)); |
|
2639 |
match(Set dst (NegD dst)); |
|
2640 |
ins_cost(150); |
|
2641 |
format %{ "xorpd $dst, [0x8000000000000000]\t" |
|
2642 |
"# neg double by sign flipping" %} |
|
2643 |
ins_encode %{ |
|
2644 |
__ xorpd($dst$$XMMRegister, ExternalAddress(double_signflip())); |
|
2645 |
%} |
|
2646 |
ins_pipe(pipe_slow); |
|
2647 |
%} |
|
2648 |
||
51857 | 2649 |
instruct negD_reg_reg(vlRegD dst, vlRegD src) %{ |
11429 | 2650 |
predicate(UseAVX > 0); |
2651 |
match(Set dst (NegD src)); |
|
2652 |
ins_cost(150); |
|
51857 | 2653 |
format %{ "vnegatesd $dst, $src, [0x8000000000000000]\t" |
11429 | 2654 |
"# neg double by sign flipping" %} |
2655 |
ins_encode %{ |
|
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
2656 |
__ vnegatesd($dst$$XMMRegister, $src$$XMMRegister, |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
2657 |
ExternalAddress(double_signflip())); |
11429 | 2658 |
%} |
2659 |
ins_pipe(pipe_slow); |
|
2660 |
%} |
|
2661 |
||
2662 |
instruct sqrtF_reg(regF dst, regF src) %{ |
|
2663 |
predicate(UseSSE>=1); |
|
48089
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
2664 |
match(Set dst (SqrtF src)); |
11429 | 2665 |
|
2666 |
format %{ "sqrtss $dst, $src" %} |
|
2667 |
ins_cost(150); |
|
2668 |
ins_encode %{ |
|
2669 |
__ sqrtss($dst$$XMMRegister, $src$$XMMRegister); |
|
2670 |
%} |
|
2671 |
ins_pipe(pipe_slow); |
|
2672 |
%} |
|
2673 |
||
2674 |
instruct sqrtF_mem(regF dst, memory src) %{ |
|
2675 |
predicate(UseSSE>=1); |
|
48089
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
2676 |
match(Set dst (SqrtF (LoadF src))); |
11429 | 2677 |
|
2678 |
format %{ "sqrtss $dst, $src" %} |
|
2679 |
ins_cost(150); |
|
2680 |
ins_encode %{ |
|
2681 |
__ sqrtss($dst$$XMMRegister, $src$$Address); |
|
2682 |
%} |
|
2683 |
ins_pipe(pipe_slow); |
|
2684 |
%} |
|
2685 |
||
2686 |
instruct sqrtF_imm(regF dst, immF con) %{ |
|
2687 |
predicate(UseSSE>=1); |
|
48089
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
2688 |
match(Set dst (SqrtF con)); |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
2689 |
|
11429 | 2690 |
format %{ "sqrtss $dst, [$constantaddress]\t# load from constant table: float=$con" %} |
2691 |
ins_cost(150); |
|
2692 |
ins_encode %{ |
|
2693 |
__ sqrtss($dst$$XMMRegister, $constantaddress($con)); |
|
2694 |
%} |
|
2695 |
ins_pipe(pipe_slow); |
|
2696 |
%} |
|
2697 |
||
2698 |
instruct sqrtD_reg(regD dst, regD src) %{ |
|
2699 |
predicate(UseSSE>=2); |
|
2700 |
match(Set dst (SqrtD src)); |
|
2701 |
||
2702 |
format %{ "sqrtsd $dst, $src" %} |
|
2703 |
ins_cost(150); |
|
2704 |
ins_encode %{ |
|
2705 |
__ sqrtsd($dst$$XMMRegister, $src$$XMMRegister); |
|
2706 |
%} |
|
2707 |
ins_pipe(pipe_slow); |
|
2708 |
%} |
|
2709 |
||
2710 |
instruct sqrtD_mem(regD dst, memory src) %{ |
|
2711 |
predicate(UseSSE>=2); |
|
2712 |
match(Set dst (SqrtD (LoadD src))); |
|
2713 |
||
2714 |
format %{ "sqrtsd $dst, $src" %} |
|
2715 |
ins_cost(150); |
|
2716 |
ins_encode %{ |
|
2717 |
__ sqrtsd($dst$$XMMRegister, $src$$Address); |
|
2718 |
%} |
|
2719 |
ins_pipe(pipe_slow); |
|
2720 |
%} |
|
2721 |
||
2722 |
instruct sqrtD_imm(regD dst, immD con) %{ |
|
2723 |
predicate(UseSSE>=2); |
|
2724 |
match(Set dst (SqrtD con)); |
|
2725 |
format %{ "sqrtsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} |
|
2726 |
ins_cost(150); |
|
2727 |
ins_encode %{ |
|
2728 |
__ sqrtsd($dst$$XMMRegister, $constantaddress($con)); |
|
2729 |
%} |
|
2730 |
ins_pipe(pipe_slow); |
|
2731 |
%} |
|
2732 |
||
58421
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2733 |
|
58450
67b3480882b4
8231713: x86_32 build failures after JDK-8226721 (Missing intrinsics for Math.ceil, floor, rint)
shade
parents:
58421
diff
changeset
|
2734 |
#ifdef _LP64 |
58421
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2735 |
instruct roundD_reg(legRegD dst, legRegD src, immU8 rmode) %{ |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2736 |
predicate(UseSSE>=4); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2737 |
match(Set dst (RoundDoubleMode src rmode)); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2738 |
format %{ "roundsd $dst, $src" %} |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2739 |
ins_cost(150); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2740 |
ins_encode %{ |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2741 |
__ roundsd($dst$$XMMRegister, $src$$XMMRegister, $rmode$$constant); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2742 |
%} |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2743 |
ins_pipe(pipe_slow); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2744 |
%} |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2745 |
|
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2746 |
instruct roundD_mem(legRegD dst, memory src, immU8 rmode) %{ |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2747 |
predicate(UseSSE>=4); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2748 |
match(Set dst (RoundDoubleMode (LoadD src) rmode)); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2749 |
format %{ "roundsd $dst, $src" %} |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2750 |
ins_cost(150); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2751 |
ins_encode %{ |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2752 |
__ roundsd($dst$$XMMRegister, $src$$Address, $rmode$$constant); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2753 |
%} |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2754 |
ins_pipe(pipe_slow); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2755 |
%} |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2756 |
|
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2757 |
instruct roundD_imm(legRegD dst, immD con, immU8 rmode, rRegI scratch_reg) %{ |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2758 |
predicate(UseSSE>=4); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2759 |
match(Set dst (RoundDoubleMode con rmode)); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2760 |
effect(TEMP scratch_reg); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2761 |
format %{ "roundsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2762 |
ins_cost(150); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2763 |
ins_encode %{ |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2764 |
__ roundsd($dst$$XMMRegister, $constantaddress($con), $rmode$$constant, $scratch_reg$$Register); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2765 |
%} |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2766 |
ins_pipe(pipe_slow); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2767 |
%} |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2768 |
|
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2769 |
instruct vround2D_reg(legVecX dst, legVecX src, immU8 rmode) %{ |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2770 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2771 |
match(Set dst (RoundDoubleModeV src rmode)); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2772 |
format %{ "vroundpd $dst, $src, $rmode\t! round packed2D" %} |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2773 |
ins_encode %{ |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2774 |
int vector_len = 0; |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2775 |
__ vroundpd($dst$$XMMRegister, $src$$XMMRegister, $rmode$$constant, vector_len); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2776 |
%} |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2777 |
ins_pipe( pipe_slow ); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2778 |
%} |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2779 |
|
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2780 |
instruct vround2D_mem(legVecX dst, memory mem, immU8 rmode) %{ |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2781 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2782 |
match(Set dst (RoundDoubleModeV (LoadVector mem) rmode)); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2783 |
format %{ "vroundpd $dst, $mem, $rmode\t! round packed2D" %} |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2784 |
ins_encode %{ |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2785 |
int vector_len = 0; |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2786 |
__ vroundpd($dst$$XMMRegister, $mem$$Address, $rmode$$constant, vector_len); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2787 |
%} |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2788 |
ins_pipe( pipe_slow ); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2789 |
%} |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2790 |
|
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2791 |
instruct vround4D_reg(legVecY dst, legVecY src, legVecY rmode) %{ |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2792 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2793 |
match(Set dst (RoundDoubleModeV src rmode)); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2794 |
format %{ "vroundpd $dst, $src, $rmode\t! round packed4D" %} |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2795 |
ins_encode %{ |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2796 |
int vector_len = 1; |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2797 |
__ vroundpd($dst$$XMMRegister, $src$$XMMRegister, $rmode$$constant, vector_len); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2798 |
%} |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2799 |
ins_pipe( pipe_slow ); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2800 |
%} |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2801 |
|
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2802 |
instruct vround4D_mem(legVecY dst, memory mem, immU8 rmode) %{ |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2803 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2804 |
match(Set dst (RoundDoubleModeV (LoadVector mem) rmode)); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2805 |
format %{ "vroundpd $dst, $mem, $rmode\t! round packed4D" %} |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2806 |
ins_encode %{ |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2807 |
int vector_len = 1; |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2808 |
__ vroundpd($dst$$XMMRegister, $mem$$Address, $rmode$$constant, vector_len); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2809 |
%} |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2810 |
ins_pipe( pipe_slow ); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2811 |
%} |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2812 |
|
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2813 |
|
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2814 |
instruct vround8D_reg(vecZ dst, vecZ src, immU8 rmode) %{ |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2815 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 8); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2816 |
match(Set dst (RoundDoubleModeV src rmode)); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2817 |
format %{ "vrndscalepd $dst, $src, $rmode\t! round packed8D" %} |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2818 |
ins_encode %{ |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2819 |
int vector_len = 2; |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2820 |
__ vrndscalepd($dst$$XMMRegister, $src$$XMMRegister, $rmode$$constant, vector_len); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2821 |
%} |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2822 |
ins_pipe( pipe_slow ); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2823 |
%} |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2824 |
|
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2825 |
instruct vround8D_mem(vecZ dst, memory mem, immU8 rmode) %{ |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2826 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 8); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2827 |
match(Set dst (RoundDoubleModeV (LoadVector mem) rmode)); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2828 |
format %{ "vrndscalepd $dst, $mem, $rmode\t! round packed8D" %} |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2829 |
ins_encode %{ |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2830 |
int vector_len = 2; |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2831 |
__ vrndscalepd($dst$$XMMRegister, $mem$$Address, $rmode$$constant, vector_len); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2832 |
%} |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2833 |
ins_pipe( pipe_slow ); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2834 |
%} |
58450
67b3480882b4
8231713: x86_32 build failures after JDK-8226721 (Missing intrinsics for Math.ceil, floor, rint)
shade
parents:
58421
diff
changeset
|
2835 |
#endif // _LP64 |
58421
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
58061
diff
changeset
|
2836 |
|
38017
55047d16f141
8147844: new method j.l.Runtime.onSpinWait() and the corresponding x86 hotspot instrinsic
ikrylov
parents:
36561
diff
changeset
|
2837 |
instruct onspinwait() %{ |
55047d16f141
8147844: new method j.l.Runtime.onSpinWait() and the corresponding x86 hotspot instrinsic
ikrylov
parents:
36561
diff
changeset
|
2838 |
match(OnSpinWait); |
55047d16f141
8147844: new method j.l.Runtime.onSpinWait() and the corresponding x86 hotspot instrinsic
ikrylov
parents:
36561
diff
changeset
|
2839 |
ins_cost(200); |
55047d16f141
8147844: new method j.l.Runtime.onSpinWait() and the corresponding x86 hotspot instrinsic
ikrylov
parents:
36561
diff
changeset
|
2840 |
|
55047d16f141
8147844: new method j.l.Runtime.onSpinWait() and the corresponding x86 hotspot instrinsic
ikrylov
parents:
36561
diff
changeset
|
2841 |
format %{ |
55047d16f141
8147844: new method j.l.Runtime.onSpinWait() and the corresponding x86 hotspot instrinsic
ikrylov
parents:
36561
diff
changeset
|
2842 |
$$template |
51996
84743156e780
8188764: Obsolete AssumeMP and then remove all support for non-MP builds
dholmes
parents:
51857
diff
changeset
|
2843 |
$$emit$$"pause\t! membar_onspinwait" |
38017
55047d16f141
8147844: new method j.l.Runtime.onSpinWait() and the corresponding x86 hotspot instrinsic
ikrylov
parents:
36561
diff
changeset
|
2844 |
%} |
55047d16f141
8147844: new method j.l.Runtime.onSpinWait() and the corresponding x86 hotspot instrinsic
ikrylov
parents:
36561
diff
changeset
|
2845 |
ins_encode %{ |
55047d16f141
8147844: new method j.l.Runtime.onSpinWait() and the corresponding x86 hotspot instrinsic
ikrylov
parents:
36561
diff
changeset
|
2846 |
__ pause(); |
55047d16f141
8147844: new method j.l.Runtime.onSpinWait() and the corresponding x86 hotspot instrinsic
ikrylov
parents:
36561
diff
changeset
|
2847 |
%} |
55047d16f141
8147844: new method j.l.Runtime.onSpinWait() and the corresponding x86 hotspot instrinsic
ikrylov
parents:
36561
diff
changeset
|
2848 |
ins_pipe(pipe_slow); |
55047d16f141
8147844: new method j.l.Runtime.onSpinWait() and the corresponding x86 hotspot instrinsic
ikrylov
parents:
36561
diff
changeset
|
2849 |
%} |
55047d16f141
8147844: new method j.l.Runtime.onSpinWait() and the corresponding x86 hotspot instrinsic
ikrylov
parents:
36561
diff
changeset
|
2850 |
|
41323 | 2851 |
// a * b + c |
2852 |
instruct fmaD_reg(regD a, regD b, regD c) %{ |
|
2853 |
predicate(UseFMA); |
|
2854 |
match(Set c (FmaD c (Binary a b))); |
|
2855 |
format %{ "fmasd $a,$b,$c\t# $c = $a * $b + $c" %} |
|
2856 |
ins_cost(150); |
|
2857 |
ins_encode %{ |
|
2858 |
__ fmad($c$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $c$$XMMRegister); |
|
2859 |
%} |
|
2860 |
ins_pipe( pipe_slow ); |
|
2861 |
%} |
|
2862 |
||
2863 |
// a * b + c |
|
2864 |
instruct fmaF_reg(regF a, regF b, regF c) %{ |
|
2865 |
predicate(UseFMA); |
|
2866 |
match(Set c (FmaF c (Binary a b))); |
|
2867 |
format %{ "fmass $a,$b,$c\t# $c = $a * $b + $c" %} |
|
2868 |
ins_cost(150); |
|
2869 |
ins_encode %{ |
|
2870 |
__ fmaf($c$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $c$$XMMRegister); |
|
2871 |
%} |
|
2872 |
ins_pipe( pipe_slow ); |
|
2873 |
%} |
|
2874 |
||
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2875 |
// ====================VECTOR INSTRUCTIONS===================================== |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2876 |
|
51857 | 2877 |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2878 |
// Load vectors (4 bytes long) |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2879 |
instruct loadV4(vecS dst, memory mem) %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2880 |
predicate(n->as_LoadVector()->memory_size() == 4); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2881 |
match(Set dst (LoadVector mem)); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2882 |
ins_cost(125); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2883 |
format %{ "movd $dst,$mem\t! load vector (4 bytes)" %} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2884 |
ins_encode %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2885 |
__ movdl($dst$$XMMRegister, $mem$$Address); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2886 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2887 |
ins_pipe( pipe_slow ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2888 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2889 |
|
51857 | 2890 |
// Load vectors (4 bytes long) |
2891 |
instruct MoveVecS2Leg(legVecS dst, vecS src) %{ |
|
2892 |
match(Set dst src); |
|
2893 |
format %{ "movss $dst,$src\t! load vector (4 bytes)" %} |
|
2894 |
ins_encode %{ |
|
2895 |
__ movflt($dst$$XMMRegister, $src$$XMMRegister); |
|
2896 |
%} |
|
2897 |
ins_pipe( fpu_reg_reg ); |
|
2898 |
%} |
|
2899 |
||
2900 |
// Load vectors (4 bytes long) |
|
2901 |
instruct MoveLeg2VecS(vecS dst, legVecS src) %{ |
|
2902 |
match(Set dst src); |
|
2903 |
format %{ "movss $dst,$src\t! load vector (4 bytes)" %} |
|
2904 |
ins_encode %{ |
|
2905 |
__ movflt($dst$$XMMRegister, $src$$XMMRegister); |
|
2906 |
%} |
|
2907 |
ins_pipe( fpu_reg_reg ); |
|
2908 |
%} |
|
2909 |
||
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2910 |
// Load vectors (8 bytes long) |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2911 |
instruct loadV8(vecD dst, memory mem) %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2912 |
predicate(n->as_LoadVector()->memory_size() == 8); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2913 |
match(Set dst (LoadVector mem)); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2914 |
ins_cost(125); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2915 |
format %{ "movq $dst,$mem\t! load vector (8 bytes)" %} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2916 |
ins_encode %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2917 |
__ movq($dst$$XMMRegister, $mem$$Address); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2918 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2919 |
ins_pipe( pipe_slow ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2920 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2921 |
|
51857 | 2922 |
// Load vectors (8 bytes long) |
2923 |
instruct MoveVecD2Leg(legVecD dst, vecD src) %{ |
|
2924 |
match(Set dst src); |
|
2925 |
format %{ "movsd $dst,$src\t! load vector (8 bytes)" %} |
|
2926 |
ins_encode %{ |
|
2927 |
__ movdbl($dst$$XMMRegister, $src$$XMMRegister); |
|
2928 |
%} |
|
2929 |
ins_pipe( fpu_reg_reg ); |
|
2930 |
%} |
|
2931 |
||
2932 |
// Load vectors (8 bytes long) |
|
2933 |
instruct MoveLeg2VecD(vecD dst, legVecD src) %{ |
|
2934 |
match(Set dst src); |
|
2935 |
format %{ "movsd $dst,$src\t! load vector (8 bytes)" %} |
|
2936 |
ins_encode %{ |
|
2937 |
__ movdbl($dst$$XMMRegister, $src$$XMMRegister); |
|
2938 |
%} |
|
2939 |
ins_pipe( fpu_reg_reg ); |
|
2940 |
%} |
|
2941 |
||
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2942 |
// Load vectors (16 bytes long) |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2943 |
instruct loadV16(vecX dst, memory mem) %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2944 |
predicate(n->as_LoadVector()->memory_size() == 16); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2945 |
match(Set dst (LoadVector mem)); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2946 |
ins_cost(125); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2947 |
format %{ "movdqu $dst,$mem\t! load vector (16 bytes)" %} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2948 |
ins_encode %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2949 |
__ movdqu($dst$$XMMRegister, $mem$$Address); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2950 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2951 |
ins_pipe( pipe_slow ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2952 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2953 |
|
51857 | 2954 |
// Load vectors (16 bytes long) |
2955 |
instruct MoveVecX2Leg(legVecX dst, vecX src) %{ |
|
2956 |
match(Set dst src); |
|
2957 |
format %{ "movdqu $dst,$src\t! load vector (16 bytes)" %} |
|
2958 |
ins_encode %{ |
|
53171
3ab3cb8a8d41
8215888: Register to register spill may use AVX 512 move instruction on unsupported platform.
sviswanathan
parents:
52992
diff
changeset
|
2959 |
if (UseAVX > 2 && !VM_Version::supports_avx512vl()) { |
51857 | 2960 |
int vector_len = 2; |
2961 |
__ evmovdquq($dst$$XMMRegister, $src$$XMMRegister, vector_len); |
|
53171
3ab3cb8a8d41
8215888: Register to register spill may use AVX 512 move instruction on unsupported platform.
sviswanathan
parents:
52992
diff
changeset
|
2962 |
} else { |
3ab3cb8a8d41
8215888: Register to register spill may use AVX 512 move instruction on unsupported platform.
sviswanathan
parents:
52992
diff
changeset
|
2963 |
__ movdqu($dst$$XMMRegister, $src$$XMMRegister); |
51857 | 2964 |
} |
2965 |
%} |
|
2966 |
ins_pipe( fpu_reg_reg ); |
|
2967 |
%} |
|
2968 |
||
2969 |
// Load vectors (16 bytes long) |
|
2970 |
instruct MoveLeg2VecX(vecX dst, legVecX src) %{ |
|
2971 |
match(Set dst src); |
|
2972 |
format %{ "movdqu $dst,$src\t! load vector (16 bytes)" %} |
|
2973 |
ins_encode %{ |
|
53171
3ab3cb8a8d41
8215888: Register to register spill may use AVX 512 move instruction on unsupported platform.
sviswanathan
parents:
52992
diff
changeset
|
2974 |
if (UseAVX > 2 && !VM_Version::supports_avx512vl()) { |
51857 | 2975 |
int vector_len = 2; |
2976 |
__ evmovdquq($dst$$XMMRegister, $src$$XMMRegister, vector_len); |
|
53171
3ab3cb8a8d41
8215888: Register to register spill may use AVX 512 move instruction on unsupported platform.
sviswanathan
parents:
52992
diff
changeset
|
2977 |
} else { |
3ab3cb8a8d41
8215888: Register to register spill may use AVX 512 move instruction on unsupported platform.
sviswanathan
parents:
52992
diff
changeset
|
2978 |
__ movdqu($dst$$XMMRegister, $src$$XMMRegister); |
51857 | 2979 |
} |
2980 |
%} |
|
2981 |
ins_pipe( fpu_reg_reg ); |
|
2982 |
%} |
|
2983 |
||
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2984 |
// Load vectors (32 bytes long) |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2985 |
instruct loadV32(vecY dst, memory mem) %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2986 |
predicate(n->as_LoadVector()->memory_size() == 32); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2987 |
match(Set dst (LoadVector mem)); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2988 |
ins_cost(125); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2989 |
format %{ "vmovdqu $dst,$mem\t! load vector (32 bytes)" %} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2990 |
ins_encode %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2991 |
__ vmovdqu($dst$$XMMRegister, $mem$$Address); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2992 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2993 |
ins_pipe( pipe_slow ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2994 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
2995 |
|
51857 | 2996 |
// Load vectors (32 bytes long) |
2997 |
instruct MoveVecY2Leg(legVecY dst, vecY src) %{ |
|
2998 |
match(Set dst src); |
|
2999 |
format %{ "vmovdqu $dst,$src\t! load vector (32 bytes)" %} |
|
3000 |
ins_encode %{ |
|
53171
3ab3cb8a8d41
8215888: Register to register spill may use AVX 512 move instruction on unsupported platform.
sviswanathan
parents:
52992
diff
changeset
|
3001 |
if (UseAVX > 2 && !VM_Version::supports_avx512vl()) { |
51857 | 3002 |
int vector_len = 2; |
3003 |
__ evmovdquq($dst$$XMMRegister, $src$$XMMRegister, vector_len); |
|
53171
3ab3cb8a8d41
8215888: Register to register spill may use AVX 512 move instruction on unsupported platform.
sviswanathan
parents:
52992
diff
changeset
|
3004 |
} else { |
3ab3cb8a8d41
8215888: Register to register spill may use AVX 512 move instruction on unsupported platform.
sviswanathan
parents:
52992
diff
changeset
|
3005 |
__ vmovdqu($dst$$XMMRegister, $src$$XMMRegister); |
51857 | 3006 |
} |
3007 |
%} |
|
3008 |
ins_pipe( fpu_reg_reg ); |
|
3009 |
%} |
|
3010 |
||
3011 |
// Load vectors (32 bytes long) |
|
3012 |
instruct MoveLeg2VecY(vecY dst, legVecY src) %{ |
|
3013 |
match(Set dst src); |
|
3014 |
format %{ "vmovdqu $dst,$src\t! load vector (32 bytes)" %} |
|
3015 |
ins_encode %{ |
|
53171
3ab3cb8a8d41
8215888: Register to register spill may use AVX 512 move instruction on unsupported platform.
sviswanathan
parents:
52992
diff
changeset
|
3016 |
if (UseAVX > 2 && !VM_Version::supports_avx512vl()) { |
51857 | 3017 |
int vector_len = 2; |
3018 |
__ evmovdquq($dst$$XMMRegister, $src$$XMMRegister, vector_len); |
|
53171
3ab3cb8a8d41
8215888: Register to register spill may use AVX 512 move instruction on unsupported platform.
sviswanathan
parents:
52992
diff
changeset
|
3019 |
} else { |
3ab3cb8a8d41
8215888: Register to register spill may use AVX 512 move instruction on unsupported platform.
sviswanathan
parents:
52992
diff
changeset
|
3020 |
__ vmovdqu($dst$$XMMRegister, $src$$XMMRegister); |
51857 | 3021 |
} |
3022 |
%} |
|
3023 |
ins_pipe( fpu_reg_reg ); |
|
3024 |
%} |
|
3025 |
||
30624 | 3026 |
// Load vectors (64 bytes long) |
38049 | 3027 |
instruct loadV64_dword(vecZ dst, memory mem) %{ |
3028 |
predicate(n->as_LoadVector()->memory_size() == 64 && n->as_LoadVector()->element_size() <= 4); |
|
30624 | 3029 |
match(Set dst (LoadVector mem)); |
3030 |
ins_cost(125); |
|
38049 | 3031 |
format %{ "vmovdqul $dst k0,$mem\t! load vector (64 bytes)" %} |
30624 | 3032 |
ins_encode %{ |
3033 |
int vector_len = 2; |
|
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3034 |
__ evmovdqul($dst$$XMMRegister, $mem$$Address, vector_len); |
30624 | 3035 |
%} |
3036 |
ins_pipe( pipe_slow ); |
|
3037 |
%} |
|
3038 |
||
38049 | 3039 |
// Load vectors (64 bytes long) |
3040 |
instruct loadV64_qword(vecZ dst, memory mem) %{ |
|
3041 |
predicate(n->as_LoadVector()->memory_size() == 64 && n->as_LoadVector()->element_size() > 4); |
|
3042 |
match(Set dst (LoadVector mem)); |
|
3043 |
ins_cost(125); |
|
3044 |
format %{ "vmovdquq $dst k0,$mem\t! load vector (64 bytes)" %} |
|
3045 |
ins_encode %{ |
|
3046 |
int vector_len = 2; |
|
3047 |
__ evmovdquq($dst$$XMMRegister, $mem$$Address, vector_len); |
|
3048 |
%} |
|
3049 |
ins_pipe( pipe_slow ); |
|
3050 |
%} |
|
3051 |
||
51857 | 3052 |
instruct MoveVecZ2Leg(legVecZ dst, vecZ src) %{ |
3053 |
match(Set dst src); |
|
3054 |
format %{ "vmovdquq $dst k0,$src\t! Move vector (64 bytes)" %} |
|
3055 |
ins_encode %{ |
|
3056 |
int vector_len = 2; |
|
3057 |
__ evmovdquq($dst$$XMMRegister, $src$$XMMRegister, vector_len); |
|
3058 |
%} |
|
3059 |
ins_pipe( fpu_reg_reg ); |
|
3060 |
%} |
|
3061 |
||
3062 |
instruct MoveLeg2VecZ(vecZ dst, legVecZ src) %{ |
|
3063 |
match(Set dst src); |
|
3064 |
format %{ "vmovdquq $dst k0,$src\t! Move vector (64 bytes)" %} |
|
3065 |
ins_encode %{ |
|
3066 |
int vector_len = 2; |
|
3067 |
__ evmovdquq($dst$$XMMRegister, $src$$XMMRegister, vector_len); |
|
3068 |
%} |
|
3069 |
ins_pipe( fpu_reg_reg ); |
|
3070 |
%} |
|
3071 |
||
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3072 |
// Store vectors |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3073 |
instruct storeV4(memory mem, vecS src) %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3074 |
predicate(n->as_StoreVector()->memory_size() == 4); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3075 |
match(Set mem (StoreVector mem src)); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3076 |
ins_cost(145); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3077 |
format %{ "movd $mem,$src\t! store vector (4 bytes)" %} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3078 |
ins_encode %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3079 |
__ movdl($mem$$Address, $src$$XMMRegister); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3080 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3081 |
ins_pipe( pipe_slow ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3082 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3083 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3084 |
instruct storeV8(memory mem, vecD src) %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3085 |
predicate(n->as_StoreVector()->memory_size() == 8); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3086 |
match(Set mem (StoreVector mem src)); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3087 |
ins_cost(145); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3088 |
format %{ "movq $mem,$src\t! store vector (8 bytes)" %} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3089 |
ins_encode %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3090 |
__ movq($mem$$Address, $src$$XMMRegister); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3091 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3092 |
ins_pipe( pipe_slow ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3093 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3094 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3095 |
instruct storeV16(memory mem, vecX src) %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3096 |
predicate(n->as_StoreVector()->memory_size() == 16); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3097 |
match(Set mem (StoreVector mem src)); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3098 |
ins_cost(145); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3099 |
format %{ "movdqu $mem,$src\t! store vector (16 bytes)" %} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3100 |
ins_encode %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3101 |
__ movdqu($mem$$Address, $src$$XMMRegister); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3102 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3103 |
ins_pipe( pipe_slow ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3104 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3105 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3106 |
instruct storeV32(memory mem, vecY src) %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3107 |
predicate(n->as_StoreVector()->memory_size() == 32); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3108 |
match(Set mem (StoreVector mem src)); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3109 |
ins_cost(145); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3110 |
format %{ "vmovdqu $mem,$src\t! store vector (32 bytes)" %} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3111 |
ins_encode %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3112 |
__ vmovdqu($mem$$Address, $src$$XMMRegister); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3113 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3114 |
ins_pipe( pipe_slow ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3115 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3116 |
|
38049 | 3117 |
instruct storeV64_dword(memory mem, vecZ src) %{ |
3118 |
predicate(n->as_StoreVector()->memory_size() == 64 && n->as_StoreVector()->element_size() <= 4); |
|
30624 | 3119 |
match(Set mem (StoreVector mem src)); |
3120 |
ins_cost(145); |
|
38049 | 3121 |
format %{ "vmovdqul $mem k0,$src\t! store vector (64 bytes)" %} |
30624 | 3122 |
ins_encode %{ |
3123 |
int vector_len = 2; |
|
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3124 |
__ evmovdqul($mem$$Address, $src$$XMMRegister, vector_len); |
30624 | 3125 |
%} |
3126 |
ins_pipe( pipe_slow ); |
|
3127 |
%} |
|
3128 |
||
38049 | 3129 |
instruct storeV64_qword(memory mem, vecZ src) %{ |
3130 |
predicate(n->as_StoreVector()->memory_size() == 64 && n->as_StoreVector()->element_size() > 4); |
|
3131 |
match(Set mem (StoreVector mem src)); |
|
3132 |
ins_cost(145); |
|
3133 |
format %{ "vmovdquq $mem k0,$src\t! store vector (64 bytes)" %} |
|
3134 |
ins_encode %{ |
|
3135 |
int vector_len = 2; |
|
3136 |
__ evmovdquq($mem$$Address, $src$$XMMRegister, vector_len); |
|
3137 |
%} |
|
3138 |
ins_pipe( pipe_slow ); |
|
3139 |
%} |
|
3140 |
||
31410 | 3141 |
// ====================LEGACY REPLICATE======================================= |
3142 |
||
3143 |
instruct Repl16B(vecX dst, rRegI src) %{ |
|
3144 |
predicate(n->as_Vector()->length() == 16 && !VM_Version::supports_avx512vlbw()); |
|
3145 |
match(Set dst (ReplicateB src)); |
|
3146 |
format %{ "movd $dst,$src\n\t" |
|
3147 |
"punpcklbw $dst,$dst\n\t" |
|
3148 |
"pshuflw $dst,$dst,0x00\n\t" |
|
3149 |
"punpcklqdq $dst,$dst\t! replicate16B" %} |
|
3150 |
ins_encode %{ |
|
3151 |
__ movdl($dst$$XMMRegister, $src$$Register); |
|
3152 |
__ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister); |
|
3153 |
__ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
|
3154 |
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
|
3155 |
%} |
|
3156 |
ins_pipe( pipe_slow ); |
|
3157 |
%} |
|
3158 |
||
3159 |
instruct Repl32B(vecY dst, rRegI src) %{ |
|
3160 |
predicate(n->as_Vector()->length() == 32 && !VM_Version::supports_avx512vlbw()); |
|
3161 |
match(Set dst (ReplicateB src)); |
|
3162 |
format %{ "movd $dst,$src\n\t" |
|
3163 |
"punpcklbw $dst,$dst\n\t" |
|
3164 |
"pshuflw $dst,$dst,0x00\n\t" |
|
3165 |
"punpcklqdq $dst,$dst\n\t" |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
3166 |
"vinserti128_high $dst,$dst\t! replicate32B" %} |
31410 | 3167 |
ins_encode %{ |
3168 |
__ movdl($dst$$XMMRegister, $src$$Register); |
|
3169 |
__ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister); |
|
3170 |
__ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
|
3171 |
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
3172 |
__ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); |
31410 | 3173 |
%} |
3174 |
ins_pipe( pipe_slow ); |
|
3175 |
%} |
|
3176 |
||
51857 | 3177 |
instruct Repl64B(legVecZ dst, rRegI src) %{ |
3178 |
predicate(n->as_Vector()->length() == 64 && !VM_Version::supports_avx512vlbw()); |
|
3179 |
match(Set dst (ReplicateB src)); |
|
3180 |
format %{ "movd $dst,$src\n\t" |
|
3181 |
"punpcklbw $dst,$dst\n\t" |
|
3182 |
"pshuflw $dst,$dst,0x00\n\t" |
|
3183 |
"punpcklqdq $dst,$dst\n\t" |
|
3184 |
"vinserti128_high $dst,$dst\t" |
|
3185 |
"vinserti64x4 $dst,$dst,$dst,0x1\t! replicate64B" %} |
|
3186 |
ins_encode %{ |
|
3187 |
__ movdl($dst$$XMMRegister, $src$$Register); |
|
3188 |
__ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister); |
|
3189 |
__ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
|
3190 |
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
|
3191 |
__ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); |
|
3192 |
__ vinserti64x4($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, 0x1); |
|
3193 |
%} |
|
3194 |
ins_pipe( pipe_slow ); |
|
3195 |
%} |
|
3196 |
||
31410 | 3197 |
instruct Repl16B_imm(vecX dst, immI con) %{ |
3198 |
predicate(n->as_Vector()->length() == 16 && !VM_Version::supports_avx512vlbw()); |
|
3199 |
match(Set dst (ReplicateB con)); |
|
3200 |
format %{ "movq $dst,[$constantaddress]\n\t" |
|
3201 |
"punpcklqdq $dst,$dst\t! replicate16B($con)" %} |
|
3202 |
ins_encode %{ |
|
3203 |
__ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1))); |
|
3204 |
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
|
3205 |
%} |
|
3206 |
ins_pipe( pipe_slow ); |
|
3207 |
%} |
|
3208 |
||
3209 |
instruct Repl32B_imm(vecY dst, immI con) %{ |
|
3210 |
predicate(n->as_Vector()->length() == 32 && !VM_Version::supports_avx512vlbw()); |
|
3211 |
match(Set dst (ReplicateB con)); |
|
3212 |
format %{ "movq $dst,[$constantaddress]\n\t" |
|
3213 |
"punpcklqdq $dst,$dst\n\t" |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
3214 |
"vinserti128_high $dst,$dst\t! lreplicate32B($con)" %} |
31410 | 3215 |
ins_encode %{ |
3216 |
__ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1))); |
|
3217 |
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
3218 |
__ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); |
31410 | 3219 |
%} |
3220 |
ins_pipe( pipe_slow ); |
|
3221 |
%} |
|
3222 |
||
51857 | 3223 |
instruct Repl64B_imm(legVecZ dst, immI con) %{ |
3224 |
predicate(n->as_Vector()->length() == 64 && !VM_Version::supports_avx512vlbw()); |
|
3225 |
match(Set dst (ReplicateB con)); |
|
3226 |
format %{ "movq $dst,[$constantaddress]\n\t" |
|
3227 |
"punpcklqdq $dst,$dst\n\t" |
|
3228 |
"vinserti128_high $dst,$dst\t" |
|
3229 |
"vinserti64x4 $dst,$dst,$dst,0x1\t! replicate64B($con)" %} |
|
3230 |
ins_encode %{ |
|
3231 |
__ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1))); |
|
3232 |
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
|
3233 |
__ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); |
|
3234 |
__ vinserti64x4($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, 0x1); |
|
3235 |
%} |
|
3236 |
ins_pipe( pipe_slow ); |
|
3237 |
%} |
|
3238 |
||
31410 | 3239 |
instruct Repl4S(vecD dst, rRegI src) %{ |
3240 |
predicate(n->as_Vector()->length() == 4 && !VM_Version::supports_avx512vlbw()); |
|
3241 |
match(Set dst (ReplicateS src)); |
|
3242 |
format %{ "movd $dst,$src\n\t" |
|
3243 |
"pshuflw $dst,$dst,0x00\t! replicate4S" %} |
|
3244 |
ins_encode %{ |
|
3245 |
__ movdl($dst$$XMMRegister, $src$$Register); |
|
3246 |
__ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
|
3247 |
%} |
|
3248 |
ins_pipe( pipe_slow ); |
|
3249 |
%} |
|
3250 |
||
3251 |
instruct Repl4S_mem(vecD dst, memory mem) %{ |
|
3252 |
predicate(n->as_Vector()->length() == 4 && UseAVX > 0 && !VM_Version::supports_avx512vlbw()); |
|
3253 |
match(Set dst (ReplicateS (LoadS mem))); |
|
3254 |
format %{ "pshuflw $dst,$mem,0x00\t! replicate4S" %} |
|
3255 |
ins_encode %{ |
|
3256 |
__ pshuflw($dst$$XMMRegister, $mem$$Address, 0x00); |
|
3257 |
%} |
|
3258 |
ins_pipe( pipe_slow ); |
|
3259 |
%} |
|
3260 |
||
3261 |
instruct Repl8S(vecX dst, rRegI src) %{ |
|
3262 |
predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vlbw()); |
|
3263 |
match(Set dst (ReplicateS src)); |
|
3264 |
format %{ "movd $dst,$src\n\t" |
|
3265 |
"pshuflw $dst,$dst,0x00\n\t" |
|
3266 |
"punpcklqdq $dst,$dst\t! replicate8S" %} |
|
3267 |
ins_encode %{ |
|
3268 |
__ movdl($dst$$XMMRegister, $src$$Register); |
|
3269 |
__ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
|
3270 |
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
|
3271 |
%} |
|
3272 |
ins_pipe( pipe_slow ); |
|
3273 |
%} |
|
3274 |
||
3275 |
instruct Repl8S_mem(vecX dst, memory mem) %{ |
|
3276 |
predicate(n->as_Vector()->length() == 8 && UseAVX > 0 && !VM_Version::supports_avx512vlbw()); |
|
3277 |
match(Set dst (ReplicateS (LoadS mem))); |
|
3278 |
format %{ "pshuflw $dst,$mem,0x00\n\t" |
|
3279 |
"punpcklqdq $dst,$dst\t! replicate8S" %} |
|
3280 |
ins_encode %{ |
|
3281 |
__ pshuflw($dst$$XMMRegister, $mem$$Address, 0x00); |
|
3282 |
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
|
3283 |
%} |
|
3284 |
ins_pipe( pipe_slow ); |
|
3285 |
%} |
|
3286 |
||
3287 |
instruct Repl8S_imm(vecX dst, immI con) %{ |
|
3288 |
predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vlbw()); |
|
3289 |
match(Set dst (ReplicateS con)); |
|
3290 |
format %{ "movq $dst,[$constantaddress]\n\t" |
|
3291 |
"punpcklqdq $dst,$dst\t! replicate8S($con)" %} |
|
3292 |
ins_encode %{ |
|
3293 |
__ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2))); |
|
3294 |
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
|
3295 |
%} |
|
3296 |
ins_pipe( pipe_slow ); |
|
3297 |
%} |
|
3298 |
||
3299 |
instruct Repl16S(vecY dst, rRegI src) %{ |
|
3300 |
predicate(n->as_Vector()->length() == 16 && !VM_Version::supports_avx512vlbw()); |
|
3301 |
match(Set dst (ReplicateS src)); |
|
3302 |
format %{ "movd $dst,$src\n\t" |
|
3303 |
"pshuflw $dst,$dst,0x00\n\t" |
|
3304 |
"punpcklqdq $dst,$dst\n\t" |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
3305 |
"vinserti128_high $dst,$dst\t! replicate16S" %} |
31410 | 3306 |
ins_encode %{ |
3307 |
__ movdl($dst$$XMMRegister, $src$$Register); |
|
3308 |
__ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
|
3309 |
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
3310 |
__ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); |
31410 | 3311 |
%} |
3312 |
ins_pipe( pipe_slow ); |
|
3313 |
%} |
|
3314 |
||
3315 |
instruct Repl16S_mem(vecY dst, memory mem) %{ |
|
3316 |
predicate(n->as_Vector()->length() == 16 && !VM_Version::supports_avx512vlbw()); |
|
3317 |
match(Set dst (ReplicateS (LoadS mem))); |
|
3318 |
format %{ "pshuflw $dst,$mem,0x00\n\t" |
|
3319 |
"punpcklqdq $dst,$dst\n\t" |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
3320 |
"vinserti128_high $dst,$dst\t! replicate16S" %} |
31410 | 3321 |
ins_encode %{ |
3322 |
__ pshuflw($dst$$XMMRegister, $mem$$Address, 0x00); |
|
3323 |
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
3324 |
__ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); |
31410 | 3325 |
%} |
3326 |
ins_pipe( pipe_slow ); |
|
3327 |
%} |
|
3328 |
||
3329 |
instruct Repl16S_imm(vecY dst, immI con) %{ |
|
3330 |
predicate(n->as_Vector()->length() == 16 && !VM_Version::supports_avx512vlbw()); |
|
3331 |
match(Set dst (ReplicateS con)); |
|
3332 |
format %{ "movq $dst,[$constantaddress]\n\t" |
|
3333 |
"punpcklqdq $dst,$dst\n\t" |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
3334 |
"vinserti128_high $dst,$dst\t! replicate16S($con)" %} |
31410 | 3335 |
ins_encode %{ |
3336 |
__ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2))); |
|
3337 |
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
3338 |
__ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); |
31410 | 3339 |
%} |
3340 |
ins_pipe( pipe_slow ); |
|
3341 |
%} |
|
3342 |
||
51857 | 3343 |
instruct Repl32S(legVecZ dst, rRegI src) %{ |
3344 |
predicate(n->as_Vector()->length() == 32 && !VM_Version::supports_avx512vlbw()); |
|
3345 |
match(Set dst (ReplicateS src)); |
|
3346 |
format %{ "movd $dst,$src\n\t" |
|
3347 |
"pshuflw $dst,$dst,0x00\n\t" |
|
3348 |
"punpcklqdq $dst,$dst\n\t" |
|
3349 |
"vinserti128_high $dst,$dst\t" |
|
3350 |
"vinserti64x4 $dst,$dst,$dst,0x1\t! replicate32S" %} |
|
3351 |
ins_encode %{ |
|
3352 |
__ movdl($dst$$XMMRegister, $src$$Register); |
|
3353 |
__ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
|
3354 |
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
|
3355 |
__ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); |
|
3356 |
__ vinserti64x4($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, 0x1); |
|
3357 |
%} |
|
3358 |
ins_pipe( pipe_slow ); |
|
3359 |
%} |
|
3360 |
||
3361 |
instruct Repl32S_mem(legVecZ dst, memory mem) %{ |
|
3362 |
predicate(n->as_Vector()->length() == 32 && !VM_Version::supports_avx512vlbw()); |
|
3363 |
match(Set dst (ReplicateS (LoadS mem))); |
|
3364 |
format %{ "pshuflw $dst,$mem,0x00\n\t" |
|
3365 |
"punpcklqdq $dst,$dst\n\t" |
|
3366 |
"vinserti128_high $dst,$dst\t" |
|
3367 |
"vinserti64x4 $dst,$dst,$dst,0x1\t! replicate32S" %} |
|
3368 |
ins_encode %{ |
|
3369 |
__ pshuflw($dst$$XMMRegister, $mem$$Address, 0x00); |
|
3370 |
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
|
3371 |
__ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); |
|
3372 |
__ vinserti64x4($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, 0x1); |
|
3373 |
%} |
|
3374 |
ins_pipe( pipe_slow ); |
|
3375 |
%} |
|
3376 |
||
3377 |
instruct Repl32S_imm(legVecZ dst, immI con) %{ |
|
3378 |
predicate(n->as_Vector()->length() == 32 && !VM_Version::supports_avx512vlbw()); |
|
3379 |
match(Set dst (ReplicateS con)); |
|
3380 |
format %{ "movq $dst,[$constantaddress]\n\t" |
|
3381 |
"punpcklqdq $dst,$dst\n\t" |
|
3382 |
"vinserti128_high $dst,$dst\t" |
|
3383 |
"vinserti64x4 $dst,$dst,$dst,0x1\t! replicate32S($con)" %} |
|
3384 |
ins_encode %{ |
|
3385 |
__ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2))); |
|
3386 |
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
|
3387 |
__ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); |
|
3388 |
__ vinserti64x4($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, 0x1); |
|
3389 |
%} |
|
3390 |
ins_pipe( pipe_slow ); |
|
3391 |
%} |
|
3392 |
||
31410 | 3393 |
instruct Repl4I(vecX dst, rRegI src) %{ |
3394 |
predicate(n->as_Vector()->length() == 4 && !VM_Version::supports_avx512vl()); |
|
3395 |
match(Set dst (ReplicateI src)); |
|
3396 |
format %{ "movd $dst,$src\n\t" |
|
3397 |
"pshufd $dst,$dst,0x00\t! replicate4I" %} |
|
3398 |
ins_encode %{ |
|
3399 |
__ movdl($dst$$XMMRegister, $src$$Register); |
|
3400 |
__ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
|
3401 |
%} |
|
3402 |
ins_pipe( pipe_slow ); |
|
3403 |
%} |
|
3404 |
||
3405 |
instruct Repl4I_mem(vecX dst, memory mem) %{ |
|
3406 |
predicate(n->as_Vector()->length() == 4 && UseAVX > 0 && !VM_Version::supports_avx512vl()); |
|
3407 |
match(Set dst (ReplicateI (LoadI mem))); |
|
3408 |
format %{ "pshufd $dst,$mem,0x00\t! replicate4I" %} |
|
3409 |
ins_encode %{ |
|
3410 |
__ pshufd($dst$$XMMRegister, $mem$$Address, 0x00); |
|
3411 |
%} |
|
3412 |
ins_pipe( pipe_slow ); |
|
3413 |
%} |
|
3414 |
||
3415 |
instruct Repl8I(vecY dst, rRegI src) %{ |
|
3416 |
predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl()); |
|
3417 |
match(Set dst (ReplicateI src)); |
|
3418 |
format %{ "movd $dst,$src\n\t" |
|
3419 |
"pshufd $dst,$dst,0x00\n\t" |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
3420 |
"vinserti128_high $dst,$dst\t! replicate8I" %} |
31410 | 3421 |
ins_encode %{ |
3422 |
__ movdl($dst$$XMMRegister, $src$$Register); |
|
3423 |
__ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
3424 |
__ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); |
31410 | 3425 |
%} |
3426 |
ins_pipe( pipe_slow ); |
|
3427 |
%} |
|
3428 |
||
3429 |
instruct Repl8I_mem(vecY dst, memory mem) %{ |
|
3430 |
predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl()); |
|
3431 |
match(Set dst (ReplicateI (LoadI mem))); |
|
3432 |
format %{ "pshufd $dst,$mem,0x00\n\t" |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
3433 |
"vinserti128_high $dst,$dst\t! replicate8I" %} |
31410 | 3434 |
ins_encode %{ |
3435 |
__ pshufd($dst$$XMMRegister, $mem$$Address, 0x00); |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
3436 |
__ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); |
31410 | 3437 |
%} |
3438 |
ins_pipe( pipe_slow ); |
|
3439 |
%} |
|
3440 |
||
51857 | 3441 |
instruct Repl16I(legVecZ dst, rRegI src) %{ |
3442 |
predicate(n->as_Vector()->length() == 16 && !VM_Version::supports_avx512vl()); |
|
3443 |
match(Set dst (ReplicateI src)); |
|
3444 |
format %{ "movd $dst,$src\n\t" |
|
3445 |
"pshufd $dst,$dst,0x00\n\t" |
|
3446 |
"vinserti128_high $dst,$dst\t" |
|
3447 |
"vinserti64x4 $dst,$dst,$dst,0x1\t! replicate16I" %} |
|
3448 |
ins_encode %{ |
|
3449 |
__ movdl($dst$$XMMRegister, $src$$Register); |
|
3450 |
__ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
|
3451 |
__ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); |
|
3452 |
__ vinserti64x4($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, 0x1); |
|
3453 |
%} |
|
3454 |
ins_pipe( pipe_slow ); |
|
3455 |
%} |
|
3456 |
||
3457 |
instruct Repl16I_mem(legVecZ dst, memory mem) %{ |
|
3458 |
predicate(n->as_Vector()->length() == 16 && !VM_Version::supports_avx512vl()); |
|
3459 |
match(Set dst (ReplicateI (LoadI mem))); |
|
3460 |
format %{ "pshufd $dst,$mem,0x00\n\t" |
|
3461 |
"vinserti128_high $dst,$dst\t" |
|
3462 |
"vinserti64x4 $dst,$dst,$dst,0x1\t! replicate16I" %} |
|
3463 |
ins_encode %{ |
|
3464 |
__ pshufd($dst$$XMMRegister, $mem$$Address, 0x00); |
|
3465 |
__ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); |
|
3466 |
__ vinserti64x4($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, 0x1); |
|
3467 |
%} |
|
3468 |
ins_pipe( pipe_slow ); |
|
3469 |
%} |
|
3470 |
||
31410 | 3471 |
instruct Repl4I_imm(vecX dst, immI con) %{ |
3472 |
predicate(n->as_Vector()->length() == 4 && !VM_Version::supports_avx512vl()); |
|
3473 |
match(Set dst (ReplicateI con)); |
|
3474 |
format %{ "movq $dst,[$constantaddress]\t! replicate4I($con)\n\t" |
|
3475 |
"punpcklqdq $dst,$dst" %} |
|
3476 |
ins_encode %{ |
|
3477 |
__ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4))); |
|
3478 |
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
|
3479 |
%} |
|
3480 |
ins_pipe( pipe_slow ); |
|
3481 |
%} |
|
3482 |
||
3483 |
instruct Repl8I_imm(vecY dst, immI con) %{ |
|
3484 |
predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl()); |
|
3485 |
match(Set dst (ReplicateI con)); |
|
3486 |
format %{ "movq $dst,[$constantaddress]\t! replicate8I($con)\n\t" |
|
3487 |
"punpcklqdq $dst,$dst\n\t" |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
3488 |
"vinserti128_high $dst,$dst" %} |
31410 | 3489 |
ins_encode %{ |
3490 |
__ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4))); |
|
3491 |
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
3492 |
__ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); |
31410 | 3493 |
%} |
3494 |
ins_pipe( pipe_slow ); |
|
3495 |
%} |
|
3496 |
||
51857 | 3497 |
instruct Repl16I_imm(legVecZ dst, immI con) %{ |
3498 |
predicate(n->as_Vector()->length() == 16 && !VM_Version::supports_avx512vl()); |
|
3499 |
match(Set dst (ReplicateI con)); |
|
3500 |
format %{ "movq $dst,[$constantaddress]\t" |
|
3501 |
"punpcklqdq $dst,$dst\n\t" |
|
3502 |
"vinserti128_high $dst,$dst" |
|
3503 |
"vinserti64x4 $dst,$dst,$dst,0x1\t! replicate16I($con)" %} |
|
3504 |
ins_encode %{ |
|
3505 |
__ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4))); |
|
3506 |
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
|
3507 |
__ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); |
|
3508 |
__ vinserti64x4($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, 0x1); |
|
3509 |
%} |
|
3510 |
ins_pipe( pipe_slow ); |
|
3511 |
%} |
|
3512 |
||
31410 | 3513 |
// Long could be loaded into xmm register directly from memory. |
3514 |
instruct Repl2L_mem(vecX dst, memory mem) %{ |
|
3515 |
predicate(n->as_Vector()->length() == 2 && !VM_Version::supports_avx512vlbw()); |
|
3516 |
match(Set dst (ReplicateL (LoadL mem))); |
|
3517 |
format %{ "movq $dst,$mem\n\t" |
|
3518 |
"punpcklqdq $dst,$dst\t! replicate2L" %} |
|
3519 |
ins_encode %{ |
|
3520 |
__ movq($dst$$XMMRegister, $mem$$Address); |
|
3521 |
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
|
3522 |
%} |
|
3523 |
ins_pipe( pipe_slow ); |
|
3524 |
%} |
|
3525 |
||
3526 |
// Replicate long (8 byte) scalar to be vector |
|
3527 |
#ifdef _LP64 |
|
3528 |
instruct Repl4L(vecY dst, rRegL src) %{ |
|
3529 |
predicate(n->as_Vector()->length() == 4 && !VM_Version::supports_avx512vl()); |
|
3530 |
match(Set dst (ReplicateL src)); |
|
3531 |
format %{ "movdq $dst,$src\n\t" |
|
3532 |
"punpcklqdq $dst,$dst\n\t" |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
3533 |
"vinserti128_high $dst,$dst\t! replicate4L" %} |
31410 | 3534 |
ins_encode %{ |
3535 |
__ movdq($dst$$XMMRegister, $src$$Register); |
|
3536 |
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
3537 |
__ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); |
31410 | 3538 |
%} |
3539 |
ins_pipe( pipe_slow ); |
|
3540 |
%} |
|
51857 | 3541 |
|
3542 |
instruct Repl8L(legVecZ dst, rRegL src) %{ |
|
3543 |
predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl()); |
|
3544 |
match(Set dst (ReplicateL src)); |
|
3545 |
format %{ "movdq $dst,$src\n\t" |
|
3546 |
"punpcklqdq $dst,$dst\n\t" |
|
3547 |
"vinserti128_high $dst,$dst\t" |
|
3548 |
"vinserti64x4 $dst,$dst,$dst,0x1\t! replicate8L" %} |
|
3549 |
ins_encode %{ |
|
3550 |
__ movdq($dst$$XMMRegister, $src$$Register); |
|
3551 |
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
|
3552 |
__ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); |
|
3553 |
__ vinserti64x4($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, 0x1); |
|
3554 |
%} |
|
3555 |
ins_pipe( pipe_slow ); |
|
3556 |
%} |
|
31410 | 3557 |
#else // _LP64 |
51857 | 3558 |
instruct Repl4L(vecY dst, eRegL src, vecY tmp) %{ |
31410 | 3559 |
predicate(n->as_Vector()->length() == 4 && !VM_Version::supports_avx512vl()); |
3560 |
match(Set dst (ReplicateL src)); |
|
3561 |
effect(TEMP dst, USE src, TEMP tmp); |
|
3562 |
format %{ "movdl $dst,$src.lo\n\t" |
|
3563 |
"movdl $tmp,$src.hi\n\t" |
|
3564 |
"punpckldq $dst,$tmp\n\t" |
|
3565 |
"punpcklqdq $dst,$dst\n\t" |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
3566 |
"vinserti128_high $dst,$dst\t! replicate4L" %} |
31410 | 3567 |
ins_encode %{ |
3568 |
__ movdl($dst$$XMMRegister, $src$$Register); |
|
3569 |
__ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register)); |
|
3570 |
__ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister); |
|
3571 |
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
3572 |
__ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); |
31410 | 3573 |
%} |
3574 |
ins_pipe( pipe_slow ); |
|
3575 |
%} |
|
51857 | 3576 |
|
3577 |
instruct Repl8L(legVecZ dst, eRegL src, legVecZ tmp) %{ |
|
3578 |
predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl()); |
|
3579 |
match(Set dst (ReplicateL src)); |
|
3580 |
effect(TEMP dst, USE src, TEMP tmp); |
|
3581 |
format %{ "movdl $dst,$src.lo\n\t" |
|
3582 |
"movdl $tmp,$src.hi\n\t" |
|
3583 |
"punpckldq $dst,$tmp\n\t" |
|
3584 |
"punpcklqdq $dst,$dst\n\t" |
|
3585 |
"vinserti128_high $dst,$dst\t" |
|
3586 |
"vinserti64x4 $dst,$dst,$dst,0x1\t! replicate8L" %} |
|
3587 |
ins_encode %{ |
|
3588 |
__ movdl($dst$$XMMRegister, $src$$Register); |
|
3589 |
__ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register)); |
|
3590 |
__ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister); |
|
3591 |
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
|
3592 |
__ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); |
|
3593 |
__ vinserti64x4($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, 0x1); |
|
3594 |
%} |
|
3595 |
ins_pipe( pipe_slow ); |
|
3596 |
%} |
|
31410 | 3597 |
#endif // _LP64 |
3598 |
||
3599 |
instruct Repl4L_imm(vecY dst, immL con) %{ |
|
3600 |
predicate(n->as_Vector()->length() == 4 && !VM_Version::supports_avx512vl()); |
|
3601 |
match(Set dst (ReplicateL con)); |
|
3602 |
format %{ "movq $dst,[$constantaddress]\n\t" |
|
3603 |
"punpcklqdq $dst,$dst\n\t" |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
3604 |
"vinserti128_high $dst,$dst\t! replicate4L($con)" %} |
31410 | 3605 |
ins_encode %{ |
3606 |
__ movq($dst$$XMMRegister, $constantaddress($con)); |
|
3607 |
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
3608 |
__ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); |
31410 | 3609 |
%} |
3610 |
ins_pipe( pipe_slow ); |
|
3611 |
%} |
|
3612 |
||
51857 | 3613 |
instruct Repl8L_imm(legVecZ dst, immL con) %{ |
3614 |
predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl()); |
|
3615 |
match(Set dst (ReplicateL con)); |
|
3616 |
format %{ "movq $dst,[$constantaddress]\n\t" |
|
3617 |
"punpcklqdq $dst,$dst\n\t" |
|
3618 |
"vinserti128_high $dst,$dst\t" |
|
3619 |
"vinserti64x4 $dst,$dst,$dst,0x1\t! replicate8L($con)" %} |
|
3620 |
ins_encode %{ |
|
3621 |
__ movq($dst$$XMMRegister, $constantaddress($con)); |
|
3622 |
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
|
3623 |
__ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); |
|
3624 |
__ vinserti64x4($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, 0x1); |
|
3625 |
%} |
|
3626 |
ins_pipe( pipe_slow ); |
|
3627 |
%} |
|
3628 |
||
31410 | 3629 |
instruct Repl4L_mem(vecY dst, memory mem) %{ |
3630 |
predicate(n->as_Vector()->length() == 4 && !VM_Version::supports_avx512vl()); |
|
3631 |
match(Set dst (ReplicateL (LoadL mem))); |
|
3632 |
format %{ "movq $dst,$mem\n\t" |
|
3633 |
"punpcklqdq $dst,$dst\n\t" |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
3634 |
"vinserti128_high $dst,$dst\t! replicate4L" %} |
31410 | 3635 |
ins_encode %{ |
3636 |
__ movq($dst$$XMMRegister, $mem$$Address); |
|
3637 |
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
3638 |
__ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); |
31410 | 3639 |
%} |
3640 |
ins_pipe( pipe_slow ); |
|
3641 |
%} |
|
3642 |
||
51857 | 3643 |
instruct Repl8L_mem(legVecZ dst, memory mem) %{ |
3644 |
predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl()); |
|
3645 |
match(Set dst (ReplicateL (LoadL mem))); |
|
3646 |
format %{ "movq $dst,$mem\n\t" |
|
3647 |
"punpcklqdq $dst,$dst\n\t" |
|
3648 |
"vinserti128_high $dst,$dst\t" |
|
3649 |
"vinserti64x4 $dst,$dst,$dst,0x1\t! replicate8L" %} |
|
3650 |
ins_encode %{ |
|
3651 |
__ movq($dst$$XMMRegister, $mem$$Address); |
|
3652 |
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
|
3653 |
__ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); |
|
3654 |
__ vinserti64x4($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, 0x1); |
|
3655 |
%} |
|
3656 |
ins_pipe( pipe_slow ); |
|
3657 |
%} |
|
3658 |
||
31410 | 3659 |
instruct Repl2F_mem(vecD dst, memory mem) %{ |
3660 |
predicate(n->as_Vector()->length() == 2 && UseAVX > 0 && !VM_Version::supports_avx512vl()); |
|
3661 |
match(Set dst (ReplicateF (LoadF mem))); |
|
3662 |
format %{ "pshufd $dst,$mem,0x00\t! replicate2F" %} |
|
3663 |
ins_encode %{ |
|
3664 |
__ pshufd($dst$$XMMRegister, $mem$$Address, 0x00); |
|
3665 |
%} |
|
3666 |
ins_pipe( pipe_slow ); |
|
3667 |
%} |
|
3668 |
||
3669 |
instruct Repl4F_mem(vecX dst, memory mem) %{ |
|
3670 |
predicate(n->as_Vector()->length() == 4 && UseAVX > 0 && !VM_Version::supports_avx512vl()); |
|
3671 |
match(Set dst (ReplicateF (LoadF mem))); |
|
3672 |
format %{ "pshufd $dst,$mem,0x00\t! replicate4F" %} |
|
3673 |
ins_encode %{ |
|
3674 |
__ pshufd($dst$$XMMRegister, $mem$$Address, 0x00); |
|
3675 |
%} |
|
3676 |
ins_pipe( pipe_slow ); |
|
3677 |
%} |
|
3678 |
||
51857 | 3679 |
instruct Repl8F(vecY dst, vlRegF src) %{ |
3680 |
predicate(n->as_Vector()->length() == 8 && UseAVX > 0 && !VM_Version::supports_avx512vl()); |
|
31410 | 3681 |
match(Set dst (ReplicateF src)); |
3682 |
format %{ "pshufd $dst,$src,0x00\n\t" |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
3683 |
"vinsertf128_high $dst,$dst\t! replicate8F" %} |
31410 | 3684 |
ins_encode %{ |
3685 |
__ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00); |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
3686 |
__ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister); |
31410 | 3687 |
%} |
3688 |
ins_pipe( pipe_slow ); |
|
3689 |
%} |
|
3690 |
||
3691 |
instruct Repl8F_mem(vecY dst, memory mem) %{ |
|
3692 |
predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl()); |
|
3693 |
match(Set dst (ReplicateF (LoadF mem))); |
|
3694 |
format %{ "pshufd $dst,$mem,0x00\n\t" |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
3695 |
"vinsertf128_high $dst,$dst\t! replicate8F" %} |
31410 | 3696 |
ins_encode %{ |
3697 |
__ pshufd($dst$$XMMRegister, $mem$$Address, 0x00); |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
3698 |
__ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister); |
31410 | 3699 |
%} |
3700 |
ins_pipe( pipe_slow ); |
|
3701 |
%} |
|
3702 |
||
51857 | 3703 |
instruct Repl16F(legVecZ dst, vlRegF src) %{ |
3704 |
predicate(n->as_Vector()->length() == 16 && UseAVX > 0 && !VM_Version::supports_avx512vl()); |
|
3705 |
match(Set dst (ReplicateF src)); |
|
3706 |
format %{ "pshufd $dst,$src,0x00\n\t" |
|
3707 |
"vinsertf128_high $dst,$dst\t" |
|
3708 |
"vinserti64x4 $dst,$dst,$dst,0x1\t! replicate16F" %} |
|
3709 |
ins_encode %{ |
|
3710 |
__ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00); |
|
3711 |
__ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister); |
|
3712 |
__ vinserti64x4($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, 0x1); |
|
3713 |
%} |
|
3714 |
ins_pipe( pipe_slow ); |
|
3715 |
%} |
|
3716 |
||
3717 |
instruct Repl16F_mem(legVecZ dst, memory mem) %{ |
|
3718 |
predicate(n->as_Vector()->length() == 16 && !VM_Version::supports_avx512vl()); |
|
3719 |
match(Set dst (ReplicateF (LoadF mem))); |
|
3720 |
format %{ "pshufd $dst,$mem,0x00\n\t" |
|
3721 |
"vinsertf128_high $dst,$dst\t" |
|
3722 |
"vinserti64x4 $dst,$dst,$dst,0x1\t! replicate16F" %} |
|
3723 |
ins_encode %{ |
|
3724 |
__ pshufd($dst$$XMMRegister, $mem$$Address, 0x00); |
|
3725 |
__ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister); |
|
3726 |
__ vinserti64x4($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, 0x1); |
|
3727 |
%} |
|
3728 |
ins_pipe( pipe_slow ); |
|
3729 |
%} |
|
3730 |
||
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3731 |
instruct Repl2F_zero(vecD dst, immF0 zero) %{ |
58462
c6f1226cfb72
8221092: UseAVX=3 has performance degredation on Skylake (X7) processors
vdeshpande
parents:
58450
diff
changeset
|
3732 |
predicate(n->as_Vector()->length() == 2); |
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3733 |
match(Set dst (ReplicateF zero)); |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3734 |
format %{ "xorps $dst,$dst\t! replicate2F zero" %} |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3735 |
ins_encode %{ |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3736 |
__ xorps($dst$$XMMRegister, $dst$$XMMRegister); |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3737 |
%} |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3738 |
ins_pipe( fpu_reg_reg ); |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3739 |
%} |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3740 |
|
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3741 |
instruct Repl4F_zero(vecX dst, immF0 zero) %{ |
58462
c6f1226cfb72
8221092: UseAVX=3 has performance degredation on Skylake (X7) processors
vdeshpande
parents:
58450
diff
changeset
|
3742 |
predicate(n->as_Vector()->length() == 4); |
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3743 |
match(Set dst (ReplicateF zero)); |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3744 |
format %{ "xorps $dst,$dst\t! replicate4F zero" %} |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3745 |
ins_encode %{ |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3746 |
__ xorps($dst$$XMMRegister, $dst$$XMMRegister); |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3747 |
%} |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3748 |
ins_pipe( fpu_reg_reg ); |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3749 |
%} |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3750 |
|
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3751 |
instruct Repl8F_zero(vecY dst, immF0 zero) %{ |
58462
c6f1226cfb72
8221092: UseAVX=3 has performance degredation on Skylake (X7) processors
vdeshpande
parents:
58450
diff
changeset
|
3752 |
predicate(n->as_Vector()->length() == 8 && UseAVX > 0); |
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3753 |
match(Set dst (ReplicateF zero)); |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3754 |
format %{ "vxorps $dst,$dst,$dst\t! replicate8F zero" %} |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3755 |
ins_encode %{ |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3756 |
int vector_len = 1; |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3757 |
__ vxorps($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector_len); |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3758 |
%} |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3759 |
ins_pipe( fpu_reg_reg ); |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3760 |
%} |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3761 |
|
31410 | 3762 |
instruct Repl2D_mem(vecX dst, memory mem) %{ |
3763 |
predicate(n->as_Vector()->length() == 2 && UseAVX > 0 && !VM_Version::supports_avx512vl()); |
|
3764 |
match(Set dst (ReplicateD (LoadD mem))); |
|
3765 |
format %{ "pshufd $dst,$mem,0x44\t! replicate2D" %} |
|
3766 |
ins_encode %{ |
|
3767 |
__ pshufd($dst$$XMMRegister, $mem$$Address, 0x44); |
|
3768 |
%} |
|
3769 |
ins_pipe( pipe_slow ); |
|
3770 |
%} |
|
3771 |
||
51857 | 3772 |
instruct Repl4D(vecY dst, vlRegD src) %{ |
3773 |
predicate(n->as_Vector()->length() == 4 && UseAVX > 0 && !VM_Version::supports_avx512vl()); |
|
31410 | 3774 |
match(Set dst (ReplicateD src)); |
3775 |
format %{ "pshufd $dst,$src,0x44\n\t" |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
3776 |
"vinsertf128_high $dst,$dst\t! replicate4D" %} |
31410 | 3777 |
ins_encode %{ |
3778 |
__ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x44); |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
3779 |
__ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister); |
31410 | 3780 |
%} |
3781 |
ins_pipe( pipe_slow ); |
|
3782 |
%} |
|
3783 |
||
3784 |
instruct Repl4D_mem(vecY dst, memory mem) %{ |
|
3785 |
predicate(n->as_Vector()->length() == 4 && !VM_Version::supports_avx512vl()); |
|
3786 |
match(Set dst (ReplicateD (LoadD mem))); |
|
3787 |
format %{ "pshufd $dst,$mem,0x44\n\t" |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
3788 |
"vinsertf128_high $dst,$dst\t! replicate4D" %} |
31410 | 3789 |
ins_encode %{ |
3790 |
__ pshufd($dst$$XMMRegister, $mem$$Address, 0x44); |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
3791 |
__ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister); |
31410 | 3792 |
%} |
3793 |
ins_pipe( pipe_slow ); |
|
3794 |
%} |
|
3795 |
||
51857 | 3796 |
instruct Repl8D(legVecZ dst, vlRegD src) %{ |
3797 |
predicate(n->as_Vector()->length() == 8 && UseAVX > 0 && !VM_Version::supports_avx512vl()); |
|
3798 |
match(Set dst (ReplicateD src)); |
|
3799 |
format %{ "pshufd $dst,$src,0x44\n\t" |
|
3800 |
"vinsertf128_high $dst,$dst\t" |
|
3801 |
"vinserti64x4 $dst,$dst,$dst,0x1\t! replicate8D" %} |
|
3802 |
ins_encode %{ |
|
3803 |
__ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x44); |
|
3804 |
__ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister); |
|
3805 |
__ vinserti64x4($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, 0x1); |
|
3806 |
%} |
|
3807 |
ins_pipe( pipe_slow ); |
|
3808 |
%} |
|
3809 |
||
3810 |
instruct Repl8D_mem(legVecZ dst, memory mem) %{ |
|
3811 |
predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl()); |
|
3812 |
match(Set dst (ReplicateD (LoadD mem))); |
|
3813 |
format %{ "pshufd $dst,$mem,0x44\n\t" |
|
3814 |
"vinsertf128_high $dst,$dst\t" |
|
3815 |
"vinserti64x4 $dst,$dst,$dst,0x1\t! replicate8D" %} |
|
3816 |
ins_encode %{ |
|
3817 |
__ pshufd($dst$$XMMRegister, $mem$$Address, 0x44); |
|
3818 |
__ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister); |
|
3819 |
__ vinserti64x4($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, 0x1); |
|
3820 |
%} |
|
3821 |
ins_pipe( pipe_slow ); |
|
3822 |
%} |
|
3823 |
||
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3824 |
// Replicate double (8 byte) scalar zero to be vector |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3825 |
instruct Repl2D_zero(vecX dst, immD0 zero) %{ |
58462
c6f1226cfb72
8221092: UseAVX=3 has performance degredation on Skylake (X7) processors
vdeshpande
parents:
58450
diff
changeset
|
3826 |
predicate(n->as_Vector()->length() == 2); |
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3827 |
match(Set dst (ReplicateD zero)); |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3828 |
format %{ "xorpd $dst,$dst\t! replicate2D zero" %} |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3829 |
ins_encode %{ |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3830 |
__ xorpd($dst$$XMMRegister, $dst$$XMMRegister); |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3831 |
%} |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3832 |
ins_pipe( fpu_reg_reg ); |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3833 |
%} |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3834 |
|
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3835 |
instruct Repl4D_zero(vecY dst, immD0 zero) %{ |
58462
c6f1226cfb72
8221092: UseAVX=3 has performance degredation on Skylake (X7) processors
vdeshpande
parents:
58450
diff
changeset
|
3836 |
predicate(n->as_Vector()->length() == 4 && UseAVX > 0); |
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3837 |
match(Set dst (ReplicateD zero)); |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3838 |
format %{ "vxorpd $dst,$dst,$dst,vect256\t! replicate4D zero" %} |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3839 |
ins_encode %{ |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3840 |
int vector_len = 1; |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3841 |
__ vxorpd($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector_len); |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3842 |
%} |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3843 |
ins_pipe( fpu_reg_reg ); |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3844 |
%} |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
3845 |
|
31410 | 3846 |
// ====================GENERIC REPLICATE========================================== |
3847 |
||
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3848 |
// Replicate byte scalar to be vector |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3849 |
instruct Repl4B(vecS dst, rRegI src) %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3850 |
predicate(n->as_Vector()->length() == 4); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3851 |
match(Set dst (ReplicateB src)); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3852 |
format %{ "movd $dst,$src\n\t" |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3853 |
"punpcklbw $dst,$dst\n\t" |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3854 |
"pshuflw $dst,$dst,0x00\t! replicate4B" %} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3855 |
ins_encode %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3856 |
__ movdl($dst$$XMMRegister, $src$$Register); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3857 |
__ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3858 |
__ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3859 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3860 |
ins_pipe( pipe_slow ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3861 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3862 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3863 |
instruct Repl8B(vecD dst, rRegI src) %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3864 |
predicate(n->as_Vector()->length() == 8); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3865 |
match(Set dst (ReplicateB src)); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3866 |
format %{ "movd $dst,$src\n\t" |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3867 |
"punpcklbw $dst,$dst\n\t" |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3868 |
"pshuflw $dst,$dst,0x00\t! replicate8B" %} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3869 |
ins_encode %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3870 |
__ movdl($dst$$XMMRegister, $src$$Register); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3871 |
__ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3872 |
__ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3873 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3874 |
ins_pipe( pipe_slow ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3875 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3876 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3877 |
// Replicate byte scalar immediate to be vector by loading from const table. |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3878 |
instruct Repl4B_imm(vecS dst, immI con) %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3879 |
predicate(n->as_Vector()->length() == 4); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3880 |
match(Set dst (ReplicateB con)); |
13294 | 3881 |
format %{ "movdl $dst,[$constantaddress]\t! replicate4B($con)" %} |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3882 |
ins_encode %{ |
13294 | 3883 |
__ movdl($dst$$XMMRegister, $constantaddress(replicate4_imm($con$$constant, 1))); |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3884 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3885 |
ins_pipe( pipe_slow ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3886 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3887 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3888 |
instruct Repl8B_imm(vecD dst, immI con) %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3889 |
predicate(n->as_Vector()->length() == 8); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3890 |
match(Set dst (ReplicateB con)); |
13294 | 3891 |
format %{ "movq $dst,[$constantaddress]\t! replicate8B($con)" %} |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3892 |
ins_encode %{ |
13294 | 3893 |
__ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1))); |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3894 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3895 |
ins_pipe( pipe_slow ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3896 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3897 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3898 |
// Replicate byte scalar zero to be vector |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3899 |
instruct Repl4B_zero(vecS dst, immI0 zero) %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3900 |
predicate(n->as_Vector()->length() == 4); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3901 |
match(Set dst (ReplicateB zero)); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3902 |
format %{ "pxor $dst,$dst\t! replicate4B zero" %} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3903 |
ins_encode %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3904 |
__ pxor($dst$$XMMRegister, $dst$$XMMRegister); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3905 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3906 |
ins_pipe( fpu_reg_reg ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3907 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3908 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3909 |
instruct Repl8B_zero(vecD dst, immI0 zero) %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3910 |
predicate(n->as_Vector()->length() == 8); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3911 |
match(Set dst (ReplicateB zero)); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3912 |
format %{ "pxor $dst,$dst\t! replicate8B zero" %} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3913 |
ins_encode %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3914 |
__ pxor($dst$$XMMRegister, $dst$$XMMRegister); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3915 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3916 |
ins_pipe( fpu_reg_reg ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3917 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3918 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3919 |
instruct Repl16B_zero(vecX dst, immI0 zero) %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3920 |
predicate(n->as_Vector()->length() == 16); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3921 |
match(Set dst (ReplicateB zero)); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3922 |
format %{ "pxor $dst,$dst\t! replicate16B zero" %} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3923 |
ins_encode %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3924 |
__ pxor($dst$$XMMRegister, $dst$$XMMRegister); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3925 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3926 |
ins_pipe( fpu_reg_reg ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3927 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3928 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3929 |
instruct Repl32B_zero(vecY dst, immI0 zero) %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3930 |
predicate(n->as_Vector()->length() == 32); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3931 |
match(Set dst (ReplicateB zero)); |
13294 | 3932 |
format %{ "vpxor $dst,$dst,$dst\t! replicate32B zero" %} |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3933 |
ins_encode %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3934 |
// Use vxorpd since AVX does not have vpxor for 256-bit (AVX2 will have it). |
30624 | 3935 |
int vector_len = 1; |
3936 |
__ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector_len); |
|
3937 |
%} |
|
3938 |
ins_pipe( fpu_reg_reg ); |
|
3939 |
%} |
|
3940 |
||
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3941 |
// Replicate char/short (2 byte) scalar to be vector |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3942 |
instruct Repl2S(vecS dst, rRegI src) %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3943 |
predicate(n->as_Vector()->length() == 2); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3944 |
match(Set dst (ReplicateS src)); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3945 |
format %{ "movd $dst,$src\n\t" |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3946 |
"pshuflw $dst,$dst,0x00\t! replicate2S" %} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3947 |
ins_encode %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3948 |
__ movdl($dst$$XMMRegister, $src$$Register); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3949 |
__ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3950 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3951 |
ins_pipe( fpu_reg_reg ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3952 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3953 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3954 |
// Replicate char/short (2 byte) scalar immediate to be vector by loading from const table. |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3955 |
instruct Repl2S_imm(vecS dst, immI con) %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3956 |
predicate(n->as_Vector()->length() == 2); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3957 |
match(Set dst (ReplicateS con)); |
13294 | 3958 |
format %{ "movdl $dst,[$constantaddress]\t! replicate2S($con)" %} |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3959 |
ins_encode %{ |
13294 | 3960 |
__ movdl($dst$$XMMRegister, $constantaddress(replicate4_imm($con$$constant, 2))); |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3961 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3962 |
ins_pipe( fpu_reg_reg ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3963 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3964 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3965 |
instruct Repl4S_imm(vecD dst, immI con) %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3966 |
predicate(n->as_Vector()->length() == 4); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3967 |
match(Set dst (ReplicateS con)); |
13294 | 3968 |
format %{ "movq $dst,[$constantaddress]\t! replicate4S($con)" %} |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3969 |
ins_encode %{ |
13294 | 3970 |
__ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2))); |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3971 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3972 |
ins_pipe( fpu_reg_reg ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3973 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3974 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3975 |
// Replicate char/short (2 byte) scalar zero to be vector |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3976 |
instruct Repl2S_zero(vecS dst, immI0 zero) %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3977 |
predicate(n->as_Vector()->length() == 2); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3978 |
match(Set dst (ReplicateS zero)); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3979 |
format %{ "pxor $dst,$dst\t! replicate2S zero" %} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3980 |
ins_encode %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3981 |
__ pxor($dst$$XMMRegister, $dst$$XMMRegister); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3982 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3983 |
ins_pipe( fpu_reg_reg ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3984 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3985 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3986 |
instruct Repl4S_zero(vecD dst, immI0 zero) %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3987 |
predicate(n->as_Vector()->length() == 4); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3988 |
match(Set dst (ReplicateS zero)); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3989 |
format %{ "pxor $dst,$dst\t! replicate4S zero" %} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3990 |
ins_encode %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3991 |
__ pxor($dst$$XMMRegister, $dst$$XMMRegister); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3992 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3993 |
ins_pipe( fpu_reg_reg ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3994 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3995 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3996 |
instruct Repl8S_zero(vecX dst, immI0 zero) %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3997 |
predicate(n->as_Vector()->length() == 8); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3998 |
match(Set dst (ReplicateS zero)); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
3999 |
format %{ "pxor $dst,$dst\t! replicate8S zero" %} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4000 |
ins_encode %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4001 |
__ pxor($dst$$XMMRegister, $dst$$XMMRegister); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4002 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4003 |
ins_pipe( fpu_reg_reg ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4004 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4005 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4006 |
instruct Repl16S_zero(vecY dst, immI0 zero) %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4007 |
predicate(n->as_Vector()->length() == 16); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4008 |
match(Set dst (ReplicateS zero)); |
13294 | 4009 |
format %{ "vpxor $dst,$dst,$dst\t! replicate16S zero" %} |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4010 |
ins_encode %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4011 |
// Use vxorpd since AVX does not have vpxor for 256-bit (AVX2 will have it). |
30624 | 4012 |
int vector_len = 1; |
4013 |
__ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector_len); |
|
4014 |
%} |
|
4015 |
ins_pipe( fpu_reg_reg ); |
|
4016 |
%} |
|
4017 |
||
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4018 |
// Replicate integer (4 byte) scalar to be vector |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4019 |
instruct Repl2I(vecD dst, rRegI src) %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4020 |
predicate(n->as_Vector()->length() == 2); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4021 |
match(Set dst (ReplicateI src)); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4022 |
format %{ "movd $dst,$src\n\t" |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4023 |
"pshufd $dst,$dst,0x00\t! replicate2I" %} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4024 |
ins_encode %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4025 |
__ movdl($dst$$XMMRegister, $src$$Register); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4026 |
__ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4027 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4028 |
ins_pipe( fpu_reg_reg ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4029 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4030 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4031 |
// Integer could be loaded into xmm register directly from memory. |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4032 |
instruct Repl2I_mem(vecD dst, memory mem) %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4033 |
predicate(n->as_Vector()->length() == 2); |
13294 | 4034 |
match(Set dst (ReplicateI (LoadI mem))); |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4035 |
format %{ "movd $dst,$mem\n\t" |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4036 |
"pshufd $dst,$dst,0x00\t! replicate2I" %} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4037 |
ins_encode %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4038 |
__ movdl($dst$$XMMRegister, $mem$$Address); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4039 |
__ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4040 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4041 |
ins_pipe( fpu_reg_reg ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4042 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4043 |
|
31410 | 4044 |
// Replicate integer (4 byte) scalar immediate to be vector by loading from const table. |
4045 |
instruct Repl2I_imm(vecD dst, immI con) %{ |
|
4046 |
predicate(n->as_Vector()->length() == 2); |
|
4047 |
match(Set dst (ReplicateI con)); |
|
4048 |
format %{ "movq $dst,[$constantaddress]\t! replicate2I($con)" %} |
|
4049 |
ins_encode %{ |
|
4050 |
__ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4))); |
|
4051 |
%} |
|
4052 |
ins_pipe( fpu_reg_reg ); |
|
30624 | 4053 |
%} |
4054 |
||
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4055 |
// Replicate integer (4 byte) scalar zero to be vector |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4056 |
instruct Repl2I_zero(vecD dst, immI0 zero) %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4057 |
predicate(n->as_Vector()->length() == 2); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4058 |
match(Set dst (ReplicateI zero)); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4059 |
format %{ "pxor $dst,$dst\t! replicate2I" %} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4060 |
ins_encode %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4061 |
__ pxor($dst$$XMMRegister, $dst$$XMMRegister); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4062 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4063 |
ins_pipe( fpu_reg_reg ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4064 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4065 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4066 |
instruct Repl4I_zero(vecX dst, immI0 zero) %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4067 |
predicate(n->as_Vector()->length() == 4); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4068 |
match(Set dst (ReplicateI zero)); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4069 |
format %{ "pxor $dst,$dst\t! replicate4I zero)" %} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4070 |
ins_encode %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4071 |
__ pxor($dst$$XMMRegister, $dst$$XMMRegister); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4072 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4073 |
ins_pipe( fpu_reg_reg ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4074 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4075 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4076 |
instruct Repl8I_zero(vecY dst, immI0 zero) %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4077 |
predicate(n->as_Vector()->length() == 8); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4078 |
match(Set dst (ReplicateI zero)); |
13294 | 4079 |
format %{ "vpxor $dst,$dst,$dst\t! replicate8I zero" %} |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4080 |
ins_encode %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4081 |
// Use vxorpd since AVX does not have vpxor for 256-bit (AVX2 will have it). |
30624 | 4082 |
int vector_len = 1; |
4083 |
__ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector_len); |
|
4084 |
%} |
|
4085 |
ins_pipe( fpu_reg_reg ); |
|
4086 |
%} |
|
4087 |
||
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4088 |
// Replicate long (8 byte) scalar to be vector |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4089 |
#ifdef _LP64 |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4090 |
instruct Repl2L(vecX dst, rRegL src) %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4091 |
predicate(n->as_Vector()->length() == 2); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4092 |
match(Set dst (ReplicateL src)); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4093 |
format %{ "movdq $dst,$src\n\t" |
13294 | 4094 |
"punpcklqdq $dst,$dst\t! replicate2L" %} |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4095 |
ins_encode %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4096 |
__ movdq($dst$$XMMRegister, $src$$Register); |
13294 | 4097 |
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4098 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4099 |
ins_pipe( pipe_slow ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4100 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4101 |
#else // _LP64 |
51857 | 4102 |
instruct Repl2L(vecX dst, eRegL src, vecX tmp) %{ |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4103 |
predicate(n->as_Vector()->length() == 2); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4104 |
match(Set dst (ReplicateL src)); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4105 |
effect(TEMP dst, USE src, TEMP tmp); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4106 |
format %{ "movdl $dst,$src.lo\n\t" |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4107 |
"movdl $tmp,$src.hi\n\t" |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4108 |
"punpckldq $dst,$tmp\n\t" |
13294 | 4109 |
"punpcklqdq $dst,$dst\t! replicate2L"%} |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4110 |
ins_encode %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4111 |
__ movdl($dst$$XMMRegister, $src$$Register); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4112 |
__ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register)); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4113 |
__ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister); |
13294 | 4114 |
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4115 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4116 |
ins_pipe( pipe_slow ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4117 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4118 |
#endif // _LP64 |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4119 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4120 |
// Replicate long (8 byte) scalar immediate to be vector by loading from const table. |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4121 |
instruct Repl2L_imm(vecX dst, immL con) %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4122 |
predicate(n->as_Vector()->length() == 2); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4123 |
match(Set dst (ReplicateL con)); |
13294 | 4124 |
format %{ "movq $dst,[$constantaddress]\n\t" |
4125 |
"punpcklqdq $dst,$dst\t! replicate2L($con)" %} |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4126 |
ins_encode %{ |
13294 | 4127 |
__ movq($dst$$XMMRegister, $constantaddress($con)); |
4128 |
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); |
|
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4129 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4130 |
ins_pipe( pipe_slow ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4131 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4132 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4133 |
// Replicate long (8 byte) scalar zero to be vector |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4134 |
instruct Repl2L_zero(vecX dst, immL0 zero) %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4135 |
predicate(n->as_Vector()->length() == 2); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4136 |
match(Set dst (ReplicateL zero)); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4137 |
format %{ "pxor $dst,$dst\t! replicate2L zero" %} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4138 |
ins_encode %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4139 |
__ pxor($dst$$XMMRegister, $dst$$XMMRegister); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4140 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4141 |
ins_pipe( fpu_reg_reg ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4142 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4143 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4144 |
instruct Repl4L_zero(vecY dst, immL0 zero) %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4145 |
predicate(n->as_Vector()->length() == 4); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4146 |
match(Set dst (ReplicateL zero)); |
13294 | 4147 |
format %{ "vpxor $dst,$dst,$dst\t! replicate4L zero" %} |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4148 |
ins_encode %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4149 |
// Use vxorpd since AVX does not have vpxor for 256-bit (AVX2 will have it). |
30624 | 4150 |
int vector_len = 1; |
4151 |
__ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector_len); |
|
4152 |
%} |
|
4153 |
ins_pipe( fpu_reg_reg ); |
|
4154 |
%} |
|
4155 |
||
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4156 |
// Replicate float (4 byte) scalar to be vector |
51857 | 4157 |
instruct Repl2F(vecD dst, vlRegF src) %{ |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4158 |
predicate(n->as_Vector()->length() == 2); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4159 |
match(Set dst (ReplicateF src)); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4160 |
format %{ "pshufd $dst,$dst,0x00\t! replicate2F" %} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4161 |
ins_encode %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4162 |
__ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4163 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4164 |
ins_pipe( fpu_reg_reg ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4165 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4166 |
|
51857 | 4167 |
instruct Repl4F(vecX dst, vlRegF src) %{ |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4168 |
predicate(n->as_Vector()->length() == 4); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4169 |
match(Set dst (ReplicateF src)); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4170 |
format %{ "pshufd $dst,$dst,0x00\t! replicate4F" %} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4171 |
ins_encode %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4172 |
__ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4173 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4174 |
ins_pipe( pipe_slow ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4175 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4176 |
|
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4177 |
// Replicate double (8 bytes) scalar to be vector |
51857 | 4178 |
instruct Repl2D(vecX dst, vlRegD src) %{ |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4179 |
predicate(n->as_Vector()->length() == 2); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4180 |
match(Set dst (ReplicateD src)); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4181 |
format %{ "pshufd $dst,$src,0x44\t! replicate2D" %} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4182 |
ins_encode %{ |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4183 |
__ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x44); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4184 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4185 |
ins_pipe( pipe_slow ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4186 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4187 |
|
31410 | 4188 |
// ====================EVEX REPLICATE============================================= |
4189 |
||
4190 |
instruct Repl4B_mem_evex(vecS dst, memory mem) %{ |
|
51857 | 4191 |
predicate(n->as_Vector()->length() == 4 && UseAVX > 2 && VM_Version::supports_avx512vlbw()); |
31410 | 4192 |
match(Set dst (ReplicateB (LoadB mem))); |
4193 |
format %{ "vpbroadcastb $dst,$mem\t! replicate4B" %} |
|
4194 |
ins_encode %{ |
|
4195 |
int vector_len = 0; |
|
51857 | 4196 |
__ vpbroadcastb($dst$$XMMRegister, $mem$$Address, vector_len); |
31410 | 4197 |
%} |
4198 |
ins_pipe( pipe_slow ); |
|
4199 |
%} |
|
4200 |
||
4201 |
instruct Repl8B_mem_evex(vecD dst, memory mem) %{ |
|
51857 | 4202 |
predicate(n->as_Vector()->length() == 8 && UseAVX > 2 && VM_Version::supports_avx512vlbw()); |
31410 | 4203 |
match(Set dst (ReplicateB (LoadB mem))); |
4204 |
format %{ "vpbroadcastb $dst,$mem\t! replicate8B" %} |
|
4205 |
ins_encode %{ |
|
4206 |
int vector_len = 0; |
|
51857 | 4207 |
__ vpbroadcastb($dst$$XMMRegister, $mem$$Address, vector_len); |
31410 | 4208 |
%} |
4209 |
ins_pipe( pipe_slow ); |
|
4210 |
%} |
|
4211 |
||
4212 |
instruct Repl16B_evex(vecX dst, rRegI src) %{ |
|
51857 | 4213 |
predicate(n->as_Vector()->length() == 16 && UseAVX > 2 && VM_Version::supports_avx512vlbw()); |
31410 | 4214 |
match(Set dst (ReplicateB src)); |
51857 | 4215 |
format %{ "evpbroadcastb $dst,$src\t! replicate16B" %} |
31410 | 4216 |
ins_encode %{ |
4217 |
int vector_len = 0; |
|
4218 |
__ evpbroadcastb($dst$$XMMRegister, $src$$Register, vector_len); |
|
4219 |
%} |
|
4220 |
ins_pipe( pipe_slow ); |
|
4221 |
%} |
|
4222 |
||
4223 |
instruct Repl16B_mem_evex(vecX dst, memory mem) %{ |
|
51857 | 4224 |
predicate(n->as_Vector()->length() == 16 && UseAVX > 2 && VM_Version::supports_avx512vlbw()); |
31410 | 4225 |
match(Set dst (ReplicateB (LoadB mem))); |
4226 |
format %{ "vpbroadcastb $dst,$mem\t! replicate16B" %} |
|
4227 |
ins_encode %{ |
|
4228 |
int vector_len = 0; |
|
51857 | 4229 |
__ vpbroadcastb($dst$$XMMRegister, $mem$$Address, vector_len); |
31410 | 4230 |
%} |
4231 |
ins_pipe( pipe_slow ); |
|
4232 |
%} |
|
4233 |
||
4234 |
instruct Repl32B_evex(vecY dst, rRegI src) %{ |
|
51857 | 4235 |
predicate(n->as_Vector()->length() == 32 && UseAVX > 2 && VM_Version::supports_avx512vlbw()); |
31410 | 4236 |
match(Set dst (ReplicateB src)); |
51857 | 4237 |
format %{ "evpbroadcastb $dst,$src\t! replicate32B" %} |
31410 | 4238 |
ins_encode %{ |
4239 |
int vector_len = 1; |
|
4240 |
__ evpbroadcastb($dst$$XMMRegister, $src$$Register, vector_len); |
|
4241 |
%} |
|
4242 |
ins_pipe( pipe_slow ); |
|
4243 |
%} |
|
4244 |
||
4245 |
instruct Repl32B_mem_evex(vecY dst, memory mem) %{ |
|
51857 | 4246 |
predicate(n->as_Vector()->length() == 32 && UseAVX > 2 && VM_Version::supports_avx512vlbw()); |
31410 | 4247 |
match(Set dst (ReplicateB (LoadB mem))); |
4248 |
format %{ "vpbroadcastb $dst,$mem\t! replicate32B" %} |
|
4249 |
ins_encode %{ |
|
4250 |
int vector_len = 1; |
|
51857 | 4251 |
__ vpbroadcastb($dst$$XMMRegister, $mem$$Address, vector_len); |
31410 | 4252 |
%} |
4253 |
ins_pipe( pipe_slow ); |
|
4254 |
%} |
|
4255 |
||
4256 |
instruct Repl64B_evex(vecZ dst, rRegI src) %{ |
|
51857 | 4257 |
predicate(n->as_Vector()->length() == 64 && UseAVX > 2 && VM_Version::supports_avx512bw()); |
31410 | 4258 |
match(Set dst (ReplicateB src)); |
51857 | 4259 |
format %{ "evpbroadcastb $dst,$src\t! upper replicate64B" %} |
31410 | 4260 |
ins_encode %{ |
4261 |
int vector_len = 2; |
|
4262 |
__ evpbroadcastb($dst$$XMMRegister, $src$$Register, vector_len); |
|
4263 |
%} |
|
4264 |
ins_pipe( pipe_slow ); |
|
4265 |
%} |
|
4266 |
||
4267 |
instruct Repl64B_mem_evex(vecZ dst, memory mem) %{ |
|
51857 | 4268 |
predicate(n->as_Vector()->length() == 64 && UseAVX > 2 && VM_Version::supports_avx512bw()); |
31410 | 4269 |
match(Set dst (ReplicateB (LoadB mem))); |
4270 |
format %{ "vpbroadcastb $dst,$mem\t! replicate64B" %} |
|
4271 |
ins_encode %{ |
|
4272 |
int vector_len = 2; |
|
51857 | 4273 |
__ vpbroadcastb($dst$$XMMRegister, $mem$$Address, vector_len); |
31410 | 4274 |
%} |
4275 |
ins_pipe( pipe_slow ); |
|
4276 |
%} |
|
4277 |
||
4278 |
instruct Repl16B_imm_evex(vecX dst, immI con) %{ |
|
51857 | 4279 |
predicate(n->as_Vector()->length() == 16 && UseAVX > 2 && VM_Version::supports_avx512vlbw()); |
31410 | 4280 |
match(Set dst (ReplicateB con)); |
4281 |
format %{ "movq $dst,[$constantaddress]\n\t" |
|
4282 |
"vpbroadcastb $dst,$dst\t! replicate16B" %} |
|
4283 |
ins_encode %{ |
|
4284 |
int vector_len = 0; |
|
4285 |
__ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1))); |
|
51857 | 4286 |
__ vpbroadcastb($dst$$XMMRegister, $dst$$XMMRegister, vector_len); |
31410 | 4287 |
%} |
4288 |
ins_pipe( pipe_slow ); |
|
4289 |
%} |
|
4290 |
||
4291 |
instruct Repl32B_imm_evex(vecY dst, immI con) %{ |
|
51857 | 4292 |
predicate(n->as_Vector()->length() == 32 && UseAVX > 2 && VM_Version::supports_avx512vlbw()); |
31410 | 4293 |
match(Set dst (ReplicateB con)); |
4294 |
format %{ "movq $dst,[$constantaddress]\n\t" |
|
4295 |
"vpbroadcastb $dst,$dst\t! replicate32B" %} |
|
4296 |
ins_encode %{ |
|
4297 |
int vector_len = 1; |
|
4298 |
__ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1))); |
|
51857 | 4299 |
__ vpbroadcastb($dst$$XMMRegister, $dst$$XMMRegister, vector_len); |
31410 | 4300 |
%} |
4301 |
ins_pipe( pipe_slow ); |
|
4302 |
%} |
|
4303 |
||
4304 |
instruct Repl64B_imm_evex(vecZ dst, immI con) %{ |
|
51857 | 4305 |
predicate(n->as_Vector()->length() == 64 && UseAVX > 2 && VM_Version::supports_avx512bw()); |
31410 | 4306 |
match(Set dst (ReplicateB con)); |
4307 |
format %{ "movq $dst,[$constantaddress]\n\t" |
|
4308 |
"vpbroadcastb $dst,$dst\t! upper replicate64B" %} |
|
4309 |
ins_encode %{ |
|
4310 |
int vector_len = 2; |
|
4311 |
__ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1))); |
|
51857 | 4312 |
__ vpbroadcastb($dst$$XMMRegister, $dst$$XMMRegister, vector_len); |
31410 | 4313 |
%} |
4314 |
ins_pipe( pipe_slow ); |
|
4315 |
%} |
|
4316 |
||
4317 |
instruct Repl64B_zero_evex(vecZ dst, immI0 zero) %{ |
|
4318 |
predicate(n->as_Vector()->length() == 64 && UseAVX > 2); |
|
4319 |
match(Set dst (ReplicateB zero)); |
|
4320 |
format %{ "vpxor $dst k0,$dst,$dst\t! replicate64B zero" %} |
|
4321 |
ins_encode %{ |
|
4322 |
// Use vxorpd since AVX does not have vpxor for 512-bit (EVEX will have it). |
|
4323 |
int vector_len = 2; |
|
4324 |
__ vpxor($dst$$XMMRegister,$dst$$XMMRegister, $dst$$XMMRegister, vector_len); |
|
4325 |
%} |
|
4326 |
ins_pipe( fpu_reg_reg ); |
|
4327 |
%} |
|
4328 |
||
4329 |
instruct Repl4S_evex(vecD dst, rRegI src) %{ |
|
51857 | 4330 |
predicate(n->as_Vector()->length() == 4 && UseAVX > 2 && VM_Version::supports_avx512vlbw()); |
31410 | 4331 |
match(Set dst (ReplicateS src)); |
51857 | 4332 |
format %{ "evpbroadcastw $dst,$src\t! replicate4S" %} |
31410 | 4333 |
ins_encode %{ |
4334 |
int vector_len = 0; |
|
4335 |
__ evpbroadcastw($dst$$XMMRegister, $src$$Register, vector_len); |
|
4336 |
%} |
|
4337 |
ins_pipe( pipe_slow ); |
|
4338 |
%} |
|
4339 |
||
4340 |
instruct Repl4S_mem_evex(vecD dst, memory mem) %{ |
|
51857 | 4341 |
predicate(n->as_Vector()->length() == 4 && UseAVX > 2 && VM_Version::supports_avx512vlbw()); |
31410 | 4342 |
match(Set dst (ReplicateS (LoadS mem))); |
4343 |
format %{ "vpbroadcastw $dst,$mem\t! replicate4S" %} |
|
4344 |
ins_encode %{ |
|
4345 |
int vector_len = 0; |
|
51857 | 4346 |
__ vpbroadcastw($dst$$XMMRegister, $mem$$Address, vector_len); |
31410 | 4347 |
%} |
4348 |
ins_pipe( pipe_slow ); |
|
4349 |
%} |
|
4350 |
||
4351 |
instruct Repl8S_evex(vecX dst, rRegI src) %{ |
|
51857 | 4352 |
predicate(n->as_Vector()->length() == 8 && UseAVX > 2 && VM_Version::supports_avx512vlbw()); |
31410 | 4353 |
match(Set dst (ReplicateS src)); |
51857 | 4354 |
format %{ "evpbroadcastw $dst,$src\t! replicate8S" %} |
31410 | 4355 |
ins_encode %{ |
4356 |
int vector_len = 0; |
|
4357 |
__ evpbroadcastw($dst$$XMMRegister, $src$$Register, vector_len); |
|
4358 |
%} |
|
4359 |
ins_pipe( pipe_slow ); |
|
4360 |
%} |
|
4361 |
||
4362 |
instruct Repl8S_mem_evex(vecX dst, memory mem) %{ |
|
51857 | 4363 |
predicate(n->as_Vector()->length() == 8 && UseAVX > 2 && VM_Version::supports_avx512vlbw()); |
31410 | 4364 |
match(Set dst (ReplicateS (LoadS mem))); |
4365 |
format %{ "vpbroadcastw $dst,$mem\t! replicate8S" %} |
|
4366 |
ins_encode %{ |
|
4367 |
int vector_len = 0; |
|
51857 | 4368 |
__ vpbroadcastw($dst$$XMMRegister, $mem$$Address, vector_len); |
31410 | 4369 |
%} |
4370 |
ins_pipe( pipe_slow ); |
|
4371 |
%} |
|
4372 |
||
4373 |
instruct Repl16S_evex(vecY dst, rRegI src) %{ |
|
51857 | 4374 |
predicate(n->as_Vector()->length() == 16 && UseAVX > 2 && VM_Version::supports_avx512vlbw()); |
31410 | 4375 |
match(Set dst (ReplicateS src)); |
51857 | 4376 |
format %{ "evpbroadcastw $dst,$src\t! replicate16S" %} |
31410 | 4377 |
ins_encode %{ |
4378 |
int vector_len = 1; |
|
4379 |
__ evpbroadcastw($dst$$XMMRegister, $src$$Register, vector_len); |
|
4380 |
%} |
|
4381 |
ins_pipe( pipe_slow ); |
|
4382 |
%} |
|
4383 |
||
4384 |
instruct Repl16S_mem_evex(vecY dst, memory mem) %{ |
|
51857 | 4385 |
predicate(n->as_Vector()->length() == 16 && UseAVX > 2 && VM_Version::supports_avx512vlbw()); |
31410 | 4386 |
match(Set dst (ReplicateS (LoadS mem))); |
4387 |
format %{ "vpbroadcastw $dst,$mem\t! replicate16S" %} |
|
4388 |
ins_encode %{ |
|
4389 |
int vector_len = 1; |
|
51857 | 4390 |
__ vpbroadcastw($dst$$XMMRegister, $mem$$Address, vector_len); |
31410 | 4391 |
%} |
4392 |
ins_pipe( pipe_slow ); |
|
4393 |
%} |
|
4394 |
||
4395 |
instruct Repl32S_evex(vecZ dst, rRegI src) %{ |
|
51857 | 4396 |
predicate(n->as_Vector()->length() == 32 && UseAVX > 2 && VM_Version::supports_avx512bw()); |
31410 | 4397 |
match(Set dst (ReplicateS src)); |
51857 | 4398 |
format %{ "evpbroadcastw $dst,$src\t! replicate32S" %} |
31410 | 4399 |
ins_encode %{ |
4400 |
int vector_len = 2; |
|
4401 |
__ evpbroadcastw($dst$$XMMRegister, $src$$Register, vector_len); |
|
4402 |
%} |
|
4403 |
ins_pipe( pipe_slow ); |
|
4404 |
%} |
|
4405 |
||
4406 |
instruct Repl32S_mem_evex(vecZ dst, memory mem) %{ |
|
51857 | 4407 |
predicate(n->as_Vector()->length() == 32 && UseAVX > 2 && VM_Version::supports_avx512bw()); |
31410 | 4408 |
match(Set dst (ReplicateS (LoadS mem))); |
4409 |
format %{ "vpbroadcastw $dst,$mem\t! replicate32S" %} |
|
4410 |
ins_encode %{ |
|
4411 |
int vector_len = 2; |
|
51857 | 4412 |
__ vpbroadcastw($dst$$XMMRegister, $mem$$Address, vector_len); |
31410 | 4413 |
%} |
4414 |
ins_pipe( pipe_slow ); |
|
4415 |
%} |
|
4416 |
||
4417 |
instruct Repl8S_imm_evex(vecX dst, immI con) %{ |
|
51857 | 4418 |
predicate(n->as_Vector()->length() == 8 && UseAVX > 2 && VM_Version::supports_avx512vlbw()); |
31410 | 4419 |
match(Set dst (ReplicateS con)); |
4420 |
format %{ "movq $dst,[$constantaddress]\n\t" |
|
4421 |
"vpbroadcastw $dst,$dst\t! replicate8S" %} |
|
4422 |
ins_encode %{ |
|
4423 |
int vector_len = 0; |
|
4424 |
__ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2))); |
|
51857 | 4425 |
__ vpbroadcastw($dst$$XMMRegister, $dst$$XMMRegister, vector_len); |
31410 | 4426 |
%} |
4427 |
ins_pipe( pipe_slow ); |
|
4428 |
%} |
|
4429 |
||
4430 |
instruct Repl16S_imm_evex(vecY dst, immI con) %{ |
|
51857 | 4431 |
predicate(n->as_Vector()->length() == 16 && UseAVX > 2 && VM_Version::supports_avx512vlbw()); |
31410 | 4432 |
match(Set dst (ReplicateS con)); |
4433 |
format %{ "movq $dst,[$constantaddress]\n\t" |
|
4434 |
"vpbroadcastw $dst,$dst\t! replicate16S" %} |
|
4435 |
ins_encode %{ |
|
4436 |
int vector_len = 1; |
|
4437 |
__ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2))); |
|
51857 | 4438 |
__ vpbroadcastw($dst$$XMMRegister, $dst$$XMMRegister, vector_len); |
31410 | 4439 |
%} |
4440 |
ins_pipe( pipe_slow ); |
|
4441 |
%} |
|
4442 |
||
4443 |
instruct Repl32S_imm_evex(vecZ dst, immI con) %{ |
|
51857 | 4444 |
predicate(n->as_Vector()->length() == 32 && UseAVX > 2 && VM_Version::supports_avx512bw()); |
31410 | 4445 |
match(Set dst (ReplicateS con)); |
4446 |
format %{ "movq $dst,[$constantaddress]\n\t" |
|
4447 |
"vpbroadcastw $dst,$dst\t! replicate32S" %} |
|
4448 |
ins_encode %{ |
|
4449 |
int vector_len = 2; |
|
4450 |
__ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2))); |
|
51857 | 4451 |
__ vpbroadcastw($dst$$XMMRegister, $dst$$XMMRegister, vector_len); |
31410 | 4452 |
%} |
4453 |
ins_pipe( pipe_slow ); |
|
4454 |
%} |
|
4455 |
||
4456 |
instruct Repl32S_zero_evex(vecZ dst, immI0 zero) %{ |
|
4457 |
predicate(n->as_Vector()->length() == 32 && UseAVX > 2); |
|
4458 |
match(Set dst (ReplicateS zero)); |
|
4459 |
format %{ "vpxor $dst k0,$dst,$dst\t! replicate32S zero" %} |
|
4460 |
ins_encode %{ |
|
4461 |
// Use vxorpd since AVX does not have vpxor for 512-bit (EVEX will have it). |
|
4462 |
int vector_len = 2; |
|
4463 |
__ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector_len); |
|
4464 |
%} |
|
4465 |
ins_pipe( fpu_reg_reg ); |
|
4466 |
%} |
|
4467 |
||
4468 |
instruct Repl4I_evex(vecX dst, rRegI src) %{ |
|
51857 | 4469 |
predicate(n->as_Vector()->length() == 4 && UseAVX > 2 && VM_Version::supports_avx512vl()); |
31410 | 4470 |
match(Set dst (ReplicateI src)); |
51857 | 4471 |
format %{ "evpbroadcastd $dst,$src\t! replicate4I" %} |
31410 | 4472 |
ins_encode %{ |
4473 |
int vector_len = 0; |
|
4474 |
__ evpbroadcastd($dst$$XMMRegister, $src$$Register, vector_len); |
|
4475 |
%} |
|
4476 |
ins_pipe( pipe_slow ); |
|
4477 |
%} |
|
4478 |
||
4479 |
instruct Repl4I_mem_evex(vecX dst, memory mem) %{ |
|
51857 | 4480 |
predicate(n->as_Vector()->length() == 4 && UseAVX > 2 && VM_Version::supports_avx512vl()); |
31410 | 4481 |
match(Set dst (ReplicateI (LoadI mem))); |
4482 |
format %{ "vpbroadcastd $dst,$mem\t! replicate4I" %} |
|
4483 |
ins_encode %{ |
|
4484 |
int vector_len = 0; |
|
51857 | 4485 |
__ vpbroadcastd($dst$$XMMRegister, $mem$$Address, vector_len); |
31410 | 4486 |
%} |
4487 |
ins_pipe( pipe_slow ); |
|
4488 |
%} |
|
4489 |
||
4490 |
instruct Repl8I_evex(vecY dst, rRegI src) %{ |
|
51857 | 4491 |
predicate(n->as_Vector()->length() == 8 && UseAVX > 2 && VM_Version::supports_avx512vl()); |
31410 | 4492 |
match(Set dst (ReplicateI src)); |
51857 | 4493 |
format %{ "evpbroadcastd $dst,$src\t! replicate8I" %} |
31410 | 4494 |
ins_encode %{ |
4495 |
int vector_len = 1; |
|
4496 |
__ evpbroadcastd($dst$$XMMRegister, $src$$Register, vector_len); |
|
4497 |
%} |
|
4498 |
ins_pipe( pipe_slow ); |
|
4499 |
%} |
|
4500 |
||
4501 |
instruct Repl8I_mem_evex(vecY dst, memory mem) %{ |
|
51857 | 4502 |
predicate(n->as_Vector()->length() == 8 && UseAVX > 2 && VM_Version::supports_avx512vl()); |
31410 | 4503 |
match(Set dst (ReplicateI (LoadI mem))); |
4504 |
format %{ "vpbroadcastd $dst,$mem\t! replicate8I" %} |
|
4505 |
ins_encode %{ |
|
4506 |
int vector_len = 1; |
|
51857 | 4507 |
__ vpbroadcastd($dst$$XMMRegister, $mem$$Address, vector_len); |
31410 | 4508 |
%} |
4509 |
ins_pipe( pipe_slow ); |
|
4510 |
%} |
|
4511 |
||
4512 |
instruct Repl16I_evex(vecZ dst, rRegI src) %{ |
|
4513 |
predicate(n->as_Vector()->length() == 16 && UseAVX > 2); |
|
4514 |
match(Set dst (ReplicateI src)); |
|
51857 | 4515 |
format %{ "evpbroadcastd $dst,$src\t! replicate16I" %} |
31410 | 4516 |
ins_encode %{ |
4517 |
int vector_len = 2; |
|
4518 |
__ evpbroadcastd($dst$$XMMRegister, $src$$Register, vector_len); |
|
4519 |
%} |
|
4520 |
ins_pipe( pipe_slow ); |
|
4521 |
%} |
|
4522 |
||
4523 |
instruct Repl16I_mem_evex(vecZ dst, memory mem) %{ |
|
4524 |
predicate(n->as_Vector()->length() == 16 && UseAVX > 2); |
|
4525 |
match(Set dst (ReplicateI (LoadI mem))); |
|
4526 |
format %{ "vpbroadcastd $dst,$mem\t! replicate16I" %} |
|
4527 |
ins_encode %{ |
|
4528 |
int vector_len = 2; |
|
51857 | 4529 |
__ vpbroadcastd($dst$$XMMRegister, $mem$$Address, vector_len); |
31410 | 4530 |
%} |
4531 |
ins_pipe( pipe_slow ); |
|
4532 |
%} |
|
4533 |
||
4534 |
instruct Repl4I_imm_evex(vecX dst, immI con) %{ |
|
51857 | 4535 |
predicate(n->as_Vector()->length() == 4 && UseAVX > 2 && VM_Version::supports_avx512vl()); |
31410 | 4536 |
match(Set dst (ReplicateI con)); |
4537 |
format %{ "movq $dst,[$constantaddress]\t! replicate8I($con)\n\t" |
|
4538 |
"vpbroadcastd $dst,$dst\t! replicate4I" %} |
|
4539 |
ins_encode %{ |
|
4540 |
int vector_len = 0; |
|
4541 |
__ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4))); |
|
51857 | 4542 |
__ vpbroadcastd($dst$$XMMRegister, $dst$$XMMRegister, vector_len); |
31410 | 4543 |
%} |
4544 |
ins_pipe( pipe_slow ); |
|
4545 |
%} |
|
4546 |
||
4547 |
instruct Repl8I_imm_evex(vecY dst, immI con) %{ |
|
51857 | 4548 |
predicate(n->as_Vector()->length() == 8 && UseAVX > 2 && VM_Version::supports_avx512vl()); |
31410 | 4549 |
match(Set dst (ReplicateI con)); |
4550 |
format %{ "movq $dst,[$constantaddress]\t! replicate8I($con)\n\t" |
|
4551 |
"vpbroadcastd $dst,$dst\t! replicate8I" %} |
|
4552 |
ins_encode %{ |
|
4553 |
int vector_len = 1; |
|
4554 |
__ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4))); |
|
51857 | 4555 |
__ vpbroadcastd($dst$$XMMRegister, $dst$$XMMRegister, vector_len); |
31410 | 4556 |
%} |
4557 |
ins_pipe( pipe_slow ); |
|
4558 |
%} |
|
4559 |
||
4560 |
instruct Repl16I_imm_evex(vecZ dst, immI con) %{ |
|
4561 |
predicate(n->as_Vector()->length() == 16 && UseAVX > 2); |
|
4562 |
match(Set dst (ReplicateI con)); |
|
4563 |
format %{ "movq $dst,[$constantaddress]\t! replicate16I($con)\n\t" |
|
4564 |
"vpbroadcastd $dst,$dst\t! replicate16I" %} |
|
4565 |
ins_encode %{ |
|
4566 |
int vector_len = 2; |
|
4567 |
__ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4))); |
|
51857 | 4568 |
__ vpbroadcastd($dst$$XMMRegister, $dst$$XMMRegister, vector_len); |
31410 | 4569 |
%} |
4570 |
ins_pipe( pipe_slow ); |
|
4571 |
%} |
|
4572 |
||
4573 |
instruct Repl16I_zero_evex(vecZ dst, immI0 zero) %{ |
|
4574 |
predicate(n->as_Vector()->length() == 16 && UseAVX > 2); |
|
4575 |
match(Set dst (ReplicateI zero)); |
|
4576 |
format %{ "vpxor $dst k0,$dst,$dst\t! replicate16I zero" %} |
|
4577 |
ins_encode %{ |
|
4578 |
// Use vxorpd since AVX does not have vpxor for 512-bit (AVX2 will have it). |
|
4579 |
int vector_len = 2; |
|
4580 |
__ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector_len); |
|
4581 |
%} |
|
4582 |
ins_pipe( fpu_reg_reg ); |
|
4583 |
%} |
|
4584 |
||
4585 |
// Replicate long (8 byte) scalar to be vector |
|
4586 |
#ifdef _LP64 |
|
4587 |
instruct Repl4L_evex(vecY dst, rRegL src) %{ |
|
51857 | 4588 |
predicate(n->as_Vector()->length() == 4 && UseAVX > 2 && VM_Version::supports_avx512vl()); |
31410 | 4589 |
match(Set dst (ReplicateL src)); |
51857 | 4590 |
format %{ "evpbroadcastq $dst,$src\t! replicate4L" %} |
31410 | 4591 |
ins_encode %{ |
4592 |
int vector_len = 1; |
|
4593 |
__ evpbroadcastq($dst$$XMMRegister, $src$$Register, vector_len); |
|
4594 |
%} |
|
4595 |
ins_pipe( pipe_slow ); |
|
4596 |
%} |
|
4597 |
||
4598 |
instruct Repl8L_evex(vecZ dst, rRegL src) %{ |
|
4599 |
predicate(n->as_Vector()->length() == 8 && UseAVX > 2); |
|
4600 |
match(Set dst (ReplicateL src)); |
|
51857 | 4601 |
format %{ "evpbroadcastq $dst,$src\t! replicate8L" %} |
31410 | 4602 |
ins_encode %{ |
4603 |
int vector_len = 2; |
|
4604 |
__ evpbroadcastq($dst$$XMMRegister, $src$$Register, vector_len); |
|
4605 |
%} |
|
4606 |
ins_pipe( pipe_slow ); |
|
4607 |
%} |
|
4608 |
#else // _LP64 |
|
4609 |
instruct Repl4L_evex(vecY dst, eRegL src, regD tmp) %{ |
|
51857 | 4610 |
predicate(n->as_Vector()->length() == 4 && UseAVX > 2 && VM_Version::supports_avx512vl()); |
31410 | 4611 |
match(Set dst (ReplicateL src)); |
4612 |
effect(TEMP dst, USE src, TEMP tmp); |
|
4613 |
format %{ "movdl $dst,$src.lo\n\t" |
|
4614 |
"movdl $tmp,$src.hi\n\t" |
|
4615 |
"punpckldq $dst,$tmp\n\t" |
|
4616 |
"vpbroadcastq $dst,$dst\t! replicate4L" %} |
|
4617 |
ins_encode %{ |
|
4618 |
int vector_len = 1; |
|
4619 |
__ movdl($dst$$XMMRegister, $src$$Register); |
|
4620 |
__ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register)); |
|
4621 |
__ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister); |
|
51857 | 4622 |
__ vpbroadcastq($dst$$XMMRegister, $dst$$XMMRegister, vector_len); |
4623 |
%} |
|
4624 |
ins_pipe( pipe_slow ); |
|
4625 |
%} |
|
4626 |
||
4627 |
instruct Repl8L_evex(legVecZ dst, eRegL src, legVecZ tmp) %{ |
|
31410 | 4628 |
predicate(n->as_Vector()->length() == 8 && UseAVX > 2); |
4629 |
match(Set dst (ReplicateL src)); |
|
4630 |
effect(TEMP dst, USE src, TEMP tmp); |
|
4631 |
format %{ "movdl $dst,$src.lo\n\t" |
|
4632 |
"movdl $tmp,$src.hi\n\t" |
|
4633 |
"punpckldq $dst,$tmp\n\t" |
|
4634 |
"vpbroadcastq $dst,$dst\t! replicate8L" %} |
|
4635 |
ins_encode %{ |
|
4636 |
int vector_len = 2; |
|
4637 |
__ movdl($dst$$XMMRegister, $src$$Register); |
|
4638 |
__ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register)); |
|
4639 |
__ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister); |
|
51857 | 4640 |
__ vpbroadcastq($dst$$XMMRegister, $dst$$XMMRegister, vector_len); |
31410 | 4641 |
%} |
4642 |
ins_pipe( pipe_slow ); |
|
4643 |
%} |
|
4644 |
#endif // _LP64 |
|
4645 |
||
4646 |
instruct Repl4L_imm_evex(vecY dst, immL con) %{ |
|
51857 | 4647 |
predicate(n->as_Vector()->length() == 4 && UseAVX > 2 && VM_Version::supports_avx512vl()); |
31410 | 4648 |
match(Set dst (ReplicateL con)); |
4649 |
format %{ "movq $dst,[$constantaddress]\n\t" |
|
4650 |
"vpbroadcastq $dst,$dst\t! replicate4L" %} |
|
4651 |
ins_encode %{ |
|
4652 |
int vector_len = 1; |
|
4653 |
__ movq($dst$$XMMRegister, $constantaddress($con)); |
|
51857 | 4654 |
__ vpbroadcastq($dst$$XMMRegister, $dst$$XMMRegister, vector_len); |
31410 | 4655 |
%} |
4656 |
ins_pipe( pipe_slow ); |
|
4657 |
%} |
|
4658 |
||
4659 |
instruct Repl8L_imm_evex(vecZ dst, immL con) %{ |
|
4660 |
predicate(n->as_Vector()->length() == 8 && UseAVX > 2); |
|
4661 |
match(Set dst (ReplicateL con)); |
|
4662 |
format %{ "movq $dst,[$constantaddress]\n\t" |
|
4663 |
"vpbroadcastq $dst,$dst\t! replicate8L" %} |
|
4664 |
ins_encode %{ |
|
4665 |
int vector_len = 2; |
|
4666 |
__ movq($dst$$XMMRegister, $constantaddress($con)); |
|
51857 | 4667 |
__ vpbroadcastq($dst$$XMMRegister, $dst$$XMMRegister, vector_len); |
31410 | 4668 |
%} |
4669 |
ins_pipe( pipe_slow ); |
|
4670 |
%} |
|
4671 |
||
4672 |
instruct Repl2L_mem_evex(vecX dst, memory mem) %{ |
|
51857 | 4673 |
predicate(n->as_Vector()->length() == 2 && UseAVX > 2 && VM_Version::supports_avx512vl()); |
31410 | 4674 |
match(Set dst (ReplicateL (LoadL mem))); |
4675 |
format %{ "vpbroadcastd $dst,$mem\t! replicate2L" %} |
|
4676 |
ins_encode %{ |
|
4677 |
int vector_len = 0; |
|
51857 | 4678 |
__ vpbroadcastq($dst$$XMMRegister, $mem$$Address, vector_len); |
31410 | 4679 |
%} |
4680 |
ins_pipe( pipe_slow ); |
|
4681 |
%} |
|
4682 |
||
4683 |
instruct Repl4L_mem_evex(vecY dst, memory mem) %{ |
|
51857 | 4684 |
predicate(n->as_Vector()->length() == 4 && UseAVX > 2 && VM_Version::supports_avx512vl()); |
31410 | 4685 |
match(Set dst (ReplicateL (LoadL mem))); |
4686 |
format %{ "vpbroadcastd $dst,$mem\t! replicate4L" %} |
|
4687 |
ins_encode %{ |
|
4688 |
int vector_len = 1; |
|
51857 | 4689 |
__ vpbroadcastq($dst$$XMMRegister, $mem$$Address, vector_len); |
31410 | 4690 |
%} |
4691 |
ins_pipe( pipe_slow ); |
|
4692 |
%} |
|
4693 |
||
4694 |
instruct Repl8L_mem_evex(vecZ dst, memory mem) %{ |
|
4695 |
predicate(n->as_Vector()->length() == 8 && UseAVX > 2); |
|
4696 |
match(Set dst (ReplicateL (LoadL mem))); |
|
4697 |
format %{ "vpbroadcastd $dst,$mem\t! replicate8L" %} |
|
4698 |
ins_encode %{ |
|
4699 |
int vector_len = 2; |
|
51857 | 4700 |
__ vpbroadcastq($dst$$XMMRegister, $mem$$Address, vector_len); |
31410 | 4701 |
%} |
4702 |
ins_pipe( pipe_slow ); |
|
4703 |
%} |
|
4704 |
||
4705 |
instruct Repl8L_zero_evex(vecZ dst, immL0 zero) %{ |
|
4706 |
predicate(n->as_Vector()->length() == 8 && UseAVX > 2); |
|
4707 |
match(Set dst (ReplicateL zero)); |
|
4708 |
format %{ "vpxor $dst k0,$dst,$dst\t! replicate8L zero" %} |
|
4709 |
ins_encode %{ |
|
4710 |
// Use vxorpd since AVX does not have vpxor for 512-bit (EVEX will have it). |
|
4711 |
int vector_len = 2; |
|
4712 |
__ vpxor($dst$$XMMRegister,$dst$$XMMRegister, $dst$$XMMRegister, vector_len); |
|
4713 |
%} |
|
4714 |
ins_pipe( fpu_reg_reg ); |
|
4715 |
%} |
|
4716 |
||
4717 |
instruct Repl8F_evex(vecY dst, regF src) %{ |
|
51857 | 4718 |
predicate(n->as_Vector()->length() == 8 && UseAVX > 2 && VM_Version::supports_avx512vl()); |
31410 | 4719 |
match(Set dst (ReplicateF src)); |
51857 | 4720 |
format %{ "vpbroadcastss $dst,$src\t! replicate8F" %} |
31410 | 4721 |
ins_encode %{ |
4722 |
int vector_len = 1; |
|
51857 | 4723 |
__ vpbroadcastss($dst$$XMMRegister, $src$$XMMRegister, vector_len); |
31410 | 4724 |
%} |
4725 |
ins_pipe( pipe_slow ); |
|
4726 |
%} |
|
4727 |
||
4728 |
instruct Repl8F_mem_evex(vecY dst, memory mem) %{ |
|
51857 | 4729 |
predicate(n->as_Vector()->length() == 8 && UseAVX > 2 && VM_Version::supports_avx512vl()); |
31410 | 4730 |
match(Set dst (ReplicateF (LoadF mem))); |
4731 |
format %{ "vbroadcastss $dst,$mem\t! replicate8F" %} |
|
4732 |
ins_encode %{ |
|
4733 |
int vector_len = 1; |
|
51857 | 4734 |
__ vpbroadcastss($dst$$XMMRegister, $mem$$Address, vector_len); |
31410 | 4735 |
%} |
4736 |
ins_pipe( pipe_slow ); |
|
4737 |
%} |
|
4738 |
||
4739 |
instruct Repl16F_evex(vecZ dst, regF src) %{ |
|
4740 |
predicate(n->as_Vector()->length() == 16 && UseAVX > 2); |
|
4741 |
match(Set dst (ReplicateF src)); |
|
51857 | 4742 |
format %{ "vpbroadcastss $dst,$src\t! replicate16F" %} |
31410 | 4743 |
ins_encode %{ |
4744 |
int vector_len = 2; |
|
51857 | 4745 |
__ vpbroadcastss($dst$$XMMRegister, $src$$XMMRegister, vector_len); |
31410 | 4746 |
%} |
4747 |
ins_pipe( pipe_slow ); |
|
4748 |
%} |
|
4749 |
||
4750 |
instruct Repl16F_mem_evex(vecZ dst, memory mem) %{ |
|
4751 |
predicate(n->as_Vector()->length() == 16 && UseAVX > 2); |
|
4752 |
match(Set dst (ReplicateF (LoadF mem))); |
|
4753 |
format %{ "vbroadcastss $dst,$mem\t! replicate16F" %} |
|
4754 |
ins_encode %{ |
|
4755 |
int vector_len = 2; |
|
51857 | 4756 |
__ vpbroadcastss($dst$$XMMRegister, $mem$$Address, vector_len); |
31410 | 4757 |
%} |
4758 |
ins_pipe( pipe_slow ); |
|
4759 |
%} |
|
4760 |
||
4761 |
instruct Repl16F_zero_evex(vecZ dst, immF0 zero) %{ |
|
4762 |
predicate(n->as_Vector()->length() == 16 && UseAVX > 2); |
|
4763 |
match(Set dst (ReplicateF zero)); |
|
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
4764 |
format %{ "vpxor $dst k0,$dst,$dst\t! replicate16F zero" %} |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
4765 |
ins_encode %{ |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
4766 |
// Use vpxor in place of vxorps since EVEX has a constriant on dq for vxorps: this is a 512-bit operation |
31410 | 4767 |
int vector_len = 2; |
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
4768 |
__ vpxor($dst$$XMMRegister,$dst$$XMMRegister, $dst$$XMMRegister, vector_len); |
31410 | 4769 |
%} |
4770 |
ins_pipe( fpu_reg_reg ); |
|
4771 |
%} |
|
4772 |
||
4773 |
instruct Repl4D_evex(vecY dst, regD src) %{ |
|
51857 | 4774 |
predicate(n->as_Vector()->length() == 4 && UseAVX > 2 && VM_Version::supports_avx512vl()); |
31410 | 4775 |
match(Set dst (ReplicateD src)); |
51857 | 4776 |
format %{ "vpbroadcastsd $dst,$src\t! replicate4D" %} |
31410 | 4777 |
ins_encode %{ |
4778 |
int vector_len = 1; |
|
51857 | 4779 |
__ vpbroadcastsd($dst$$XMMRegister, $src$$XMMRegister, vector_len); |
31410 | 4780 |
%} |
4781 |
ins_pipe( pipe_slow ); |
|
4782 |
%} |
|
4783 |
||
4784 |
instruct Repl4D_mem_evex(vecY dst, memory mem) %{ |
|
51857 | 4785 |
predicate(n->as_Vector()->length() == 4 && UseAVX > 2 && VM_Version::supports_avx512vl()); |
31410 | 4786 |
match(Set dst (ReplicateD (LoadD mem))); |
4787 |
format %{ "vbroadcastsd $dst,$mem\t! replicate4D" %} |
|
4788 |
ins_encode %{ |
|
4789 |
int vector_len = 1; |
|
51857 | 4790 |
__ vpbroadcastsd($dst$$XMMRegister, $mem$$Address, vector_len); |
31410 | 4791 |
%} |
4792 |
ins_pipe( pipe_slow ); |
|
4793 |
%} |
|
4794 |
||
4795 |
instruct Repl8D_evex(vecZ dst, regD src) %{ |
|
4796 |
predicate(n->as_Vector()->length() == 8 && UseAVX > 2); |
|
4797 |
match(Set dst (ReplicateD src)); |
|
51857 | 4798 |
format %{ "vpbroadcastsd $dst,$src\t! replicate8D" %} |
31410 | 4799 |
ins_encode %{ |
4800 |
int vector_len = 2; |
|
51857 | 4801 |
__ vpbroadcastsd($dst$$XMMRegister, $src$$XMMRegister, vector_len); |
31410 | 4802 |
%} |
4803 |
ins_pipe( pipe_slow ); |
|
4804 |
%} |
|
4805 |
||
4806 |
instruct Repl8D_mem_evex(vecZ dst, memory mem) %{ |
|
4807 |
predicate(n->as_Vector()->length() == 8 && UseAVX > 2); |
|
4808 |
match(Set dst (ReplicateD (LoadD mem))); |
|
4809 |
format %{ "vbroadcastsd $dst,$mem\t! replicate8D" %} |
|
4810 |
ins_encode %{ |
|
4811 |
int vector_len = 2; |
|
51857 | 4812 |
__ vpbroadcastsd($dst$$XMMRegister, $mem$$Address, vector_len); |
31410 | 4813 |
%} |
4814 |
ins_pipe( pipe_slow ); |
|
4815 |
%} |
|
4816 |
||
4817 |
instruct Repl8D_zero_evex(vecZ dst, immD0 zero) %{ |
|
4818 |
predicate(n->as_Vector()->length() == 8 && UseAVX > 2); |
|
30624 | 4819 |
match(Set dst (ReplicateD zero)); |
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
4820 |
format %{ "vpxor $dst k0,$dst,$dst,vect512\t! replicate8D zero" %} |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
4821 |
ins_encode %{ |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
4822 |
// Use vpxor in place of vxorpd since EVEX has a constriant on dq for vxorpd: this is a 512-bit operation |
30624 | 4823 |
int vector_len = 2; |
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32723
diff
changeset
|
4824 |
__ vpxor($dst$$XMMRegister,$dst$$XMMRegister, $dst$$XMMRegister, vector_len); |
13104
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4825 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4826 |
ins_pipe( fpu_reg_reg ); |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4827 |
%} |
657b387034fb
7119644: Increase superword's vector size up to 256 bits
kvn
parents:
11794
diff
changeset
|
4828 |
|
30211 | 4829 |
// ====================REDUCTION ARITHMETIC======================================= |
4830 |
||
51857 | 4831 |
instruct rsadd2I_reduction_reg(rRegI dst, rRegI src1, vecD src2, vecD tmp, vecD tmp2) %{ |
30211 | 4832 |
predicate(UseSSE > 2 && UseAVX == 0); |
4833 |
match(Set dst (AddReductionVI src1 src2)); |
|
4834 |
effect(TEMP tmp2, TEMP tmp); |
|
4835 |
format %{ "movdqu $tmp2,$src2\n\t" |
|
4836 |
"phaddd $tmp2,$tmp2\n\t" |
|
4837 |
"movd $tmp,$src1\n\t" |
|
4838 |
"paddd $tmp,$tmp2\n\t" |
|
4839 |
"movd $dst,$tmp\t! add reduction2I" %} |
|
4840 |
ins_encode %{ |
|
4841 |
__ movdqu($tmp2$$XMMRegister, $src2$$XMMRegister); |
|
4842 |
__ phaddd($tmp2$$XMMRegister, $tmp2$$XMMRegister); |
|
4843 |
__ movdl($tmp$$XMMRegister, $src1$$Register); |
|
4844 |
__ paddd($tmp$$XMMRegister, $tmp2$$XMMRegister); |
|
4845 |
__ movdl($dst$$Register, $tmp$$XMMRegister); |
|
4846 |
%} |
|
4847 |
ins_pipe( pipe_slow ); |
|
4848 |
%} |
|
4849 |
||
51857 | 4850 |
instruct rvadd2I_reduction_reg(rRegI dst, rRegI src1, vecD src2, vecD tmp, vecD tmp2) %{ |
34162 | 4851 |
predicate(VM_Version::supports_avxonly()); |
30211 | 4852 |
match(Set dst (AddReductionVI src1 src2)); |
4853 |
effect(TEMP tmp, TEMP tmp2); |
|
30624 | 4854 |
format %{ "vphaddd $tmp,$src2,$src2\n\t" |
4855 |
"movd $tmp2,$src1\n\t" |
|
4856 |
"vpaddd $tmp2,$tmp2,$tmp\n\t" |
|
4857 |
"movd $dst,$tmp2\t! add reduction2I" %} |
|
4858 |
ins_encode %{ |
|
4859 |
int vector_len = 0; |
|
4860 |
__ vphaddd($tmp$$XMMRegister, $src2$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
4861 |
__ movdl($tmp2$$XMMRegister, $src1$$Register); |
|
4862 |
__ vpaddd($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, vector_len); |
|
4863 |
__ movdl($dst$$Register, $tmp2$$XMMRegister); |
|
4864 |
%} |
|
4865 |
ins_pipe( pipe_slow ); |
|
4866 |
%} |
|
4867 |
||
51857 | 4868 |
instruct rvadd2I_reduction_reg_evex(rRegI dst, rRegI src1, vecD src2, vecD tmp, vecD tmp2) %{ |
30624 | 4869 |
predicate(UseAVX > 2); |
4870 |
match(Set dst (AddReductionVI src1 src2)); |
|
4871 |
effect(TEMP tmp, TEMP tmp2); |
|
4872 |
format %{ "pshufd $tmp2,$src2,0x1\n\t" |
|
4873 |
"vpaddd $tmp,$src2,$tmp2\n\t" |
|
30211 | 4874 |
"movd $tmp2,$src1\n\t" |
30624 | 4875 |
"vpaddd $tmp2,$tmp,$tmp2\n\t" |
30211 | 4876 |
"movd $dst,$tmp2\t! add reduction2I" %} |
4877 |
ins_encode %{ |
|
30624 | 4878 |
int vector_len = 0; |
4879 |
__ pshufd($tmp2$$XMMRegister, $src2$$XMMRegister, 0x1); |
|
4880 |
__ vpaddd($tmp$$XMMRegister, $src2$$XMMRegister, $tmp2$$XMMRegister, vector_len); |
|
30211 | 4881 |
__ movdl($tmp2$$XMMRegister, $src1$$Register); |
30624 | 4882 |
__ vpaddd($tmp2$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, vector_len); |
30211 | 4883 |
__ movdl($dst$$Register, $tmp2$$XMMRegister); |
4884 |
%} |
|
4885 |
ins_pipe( pipe_slow ); |
|
4886 |
%} |
|
4887 |
||
51857 | 4888 |
instruct rsadd4I_reduction_reg(rRegI dst, rRegI src1, vecX src2, vecX tmp, vecX tmp2) %{ |
30211 | 4889 |
predicate(UseSSE > 2 && UseAVX == 0); |
4890 |
match(Set dst (AddReductionVI src1 src2)); |
|
34162 | 4891 |
effect(TEMP tmp, TEMP tmp2); |
4892 |
format %{ "movdqu $tmp,$src2\n\t" |
|
4893 |
"phaddd $tmp,$tmp\n\t" |
|
4894 |
"phaddd $tmp,$tmp\n\t" |
|
4895 |
"movd $tmp2,$src1\n\t" |
|
4896 |
"paddd $tmp2,$tmp\n\t" |
|
4897 |
"movd $dst,$tmp2\t! add reduction4I" %} |
|
4898 |
ins_encode %{ |
|
4899 |
__ movdqu($tmp$$XMMRegister, $src2$$XMMRegister); |
|
4900 |
__ phaddd($tmp$$XMMRegister, $tmp$$XMMRegister); |
|
4901 |
__ phaddd($tmp$$XMMRegister, $tmp$$XMMRegister); |
|
4902 |
__ movdl($tmp2$$XMMRegister, $src1$$Register); |
|
4903 |
__ paddd($tmp2$$XMMRegister, $tmp$$XMMRegister); |
|
4904 |
__ movdl($dst$$Register, $tmp2$$XMMRegister); |
|
30211 | 4905 |
%} |
4906 |
ins_pipe( pipe_slow ); |
|
4907 |
%} |
|
4908 |
||
51857 | 4909 |
instruct rvadd4I_reduction_reg(rRegI dst, rRegI src1, vecX src2, vecX tmp, vecX tmp2) %{ |
34162 | 4910 |
predicate(VM_Version::supports_avxonly()); |
30211 | 4911 |
match(Set dst (AddReductionVI src1 src2)); |
4912 |
effect(TEMP tmp, TEMP tmp2); |
|
30624 | 4913 |
format %{ "vphaddd $tmp,$src2,$src2\n\t" |
34162 | 4914 |
"vphaddd $tmp,$tmp,$tmp\n\t" |
30624 | 4915 |
"movd $tmp2,$src1\n\t" |
4916 |
"vpaddd $tmp2,$tmp2,$tmp\n\t" |
|
4917 |
"movd $dst,$tmp2\t! add reduction4I" %} |
|
4918 |
ins_encode %{ |
|
4919 |
int vector_len = 0; |
|
4920 |
__ vphaddd($tmp$$XMMRegister, $src2$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
34162 | 4921 |
__ vphaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp$$XMMRegister, vector_len); |
30624 | 4922 |
__ movdl($tmp2$$XMMRegister, $src1$$Register); |
4923 |
__ vpaddd($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, vector_len); |
|
4924 |
__ movdl($dst$$Register, $tmp2$$XMMRegister); |
|
4925 |
%} |
|
4926 |
ins_pipe( pipe_slow ); |
|
4927 |
%} |
|
4928 |
||
51857 | 4929 |
instruct rvadd4I_reduction_reg_evex(rRegI dst, rRegI src1, vecX src2, vecX tmp, vecX tmp2) %{ |
30624 | 4930 |
predicate(UseAVX > 2); |
4931 |
match(Set dst (AddReductionVI src1 src2)); |
|
4932 |
effect(TEMP tmp, TEMP tmp2); |
|
4933 |
format %{ "pshufd $tmp2,$src2,0xE\n\t" |
|
4934 |
"vpaddd $tmp,$src2,$tmp2\n\t" |
|
4935 |
"pshufd $tmp2,$tmp,0x1\n\t" |
|
4936 |
"vpaddd $tmp,$tmp,$tmp2\n\t" |
|
30211 | 4937 |
"movd $tmp2,$src1\n\t" |
30624 | 4938 |
"vpaddd $tmp2,$tmp,$tmp2\n\t" |
30211 | 4939 |
"movd $dst,$tmp2\t! add reduction4I" %} |
4940 |
ins_encode %{ |
|
30624 | 4941 |
int vector_len = 0; |
4942 |
__ pshufd($tmp2$$XMMRegister, $src2$$XMMRegister, 0xE); |
|
4943 |
__ vpaddd($tmp$$XMMRegister, $src2$$XMMRegister, $tmp2$$XMMRegister, vector_len); |
|
4944 |
__ pshufd($tmp2$$XMMRegister, $tmp$$XMMRegister, 0x1); |
|
4945 |
__ vpaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, vector_len); |
|
30211 | 4946 |
__ movdl($tmp2$$XMMRegister, $src1$$Register); |
30624 | 4947 |
__ vpaddd($tmp2$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, vector_len); |
30211 | 4948 |
__ movdl($dst$$Register, $tmp2$$XMMRegister); |
4949 |
%} |
|
4950 |
ins_pipe( pipe_slow ); |
|
4951 |
%} |
|
4952 |
||
51857 | 4953 |
instruct rvadd8I_reduction_reg(rRegI dst, rRegI src1, vecY src2, vecY tmp, vecY tmp2) %{ |
34162 | 4954 |
predicate(VM_Version::supports_avxonly()); |
30624 | 4955 |
match(Set dst (AddReductionVI src1 src2)); |
4956 |
effect(TEMP tmp, TEMP tmp2); |
|
4957 |
format %{ "vphaddd $tmp,$src2,$src2\n\t" |
|
4958 |
"vphaddd $tmp,$tmp,$tmp2\n\t" |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
4959 |
"vextracti128_high $tmp2,$tmp\n\t" |
30624 | 4960 |
"vpaddd $tmp,$tmp,$tmp2\n\t" |
4961 |
"movd $tmp2,$src1\n\t" |
|
4962 |
"vpaddd $tmp2,$tmp2,$tmp\n\t" |
|
4963 |
"movd $dst,$tmp2\t! add reduction8I" %} |
|
4964 |
ins_encode %{ |
|
4965 |
int vector_len = 1; |
|
4966 |
__ vphaddd($tmp$$XMMRegister, $src2$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
4967 |
__ vphaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, vector_len); |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
4968 |
__ vextracti128_high($tmp2$$XMMRegister, $tmp$$XMMRegister); |
30624 | 4969 |
__ vpaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, 0); |
4970 |
__ movdl($tmp2$$XMMRegister, $src1$$Register); |
|
4971 |
__ vpaddd($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, 0); |
|
4972 |
__ movdl($dst$$Register, $tmp2$$XMMRegister); |
|
4973 |
%} |
|
4974 |
ins_pipe( pipe_slow ); |
|
4975 |
%} |
|
4976 |
||
51857 | 4977 |
instruct rvadd8I_reduction_reg_evex(rRegI dst, rRegI src1, vecY src2, vecY tmp, vecY tmp2) %{ |
30624 | 4978 |
predicate(UseAVX > 2); |
30211 | 4979 |
match(Set dst (AddReductionVI src1 src2)); |
4980 |
effect(TEMP tmp, TEMP tmp2); |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
4981 |
format %{ "vextracti128_high $tmp,$src2\n\t" |
30624 | 4982 |
"vpaddd $tmp,$tmp,$src2\n\t" |
4983 |
"pshufd $tmp2,$tmp,0xE\n\t" |
|
4984 |
"vpaddd $tmp,$tmp,$tmp2\n\t" |
|
4985 |
"pshufd $tmp2,$tmp,0x1\n\t" |
|
4986 |
"vpaddd $tmp,$tmp,$tmp2\n\t" |
|
4987 |
"movd $tmp2,$src1\n\t" |
|
4988 |
"vpaddd $tmp2,$tmp,$tmp2\n\t" |
|
4989 |
"movd $dst,$tmp2\t! add reduction8I" %} |
|
4990 |
ins_encode %{ |
|
4991 |
int vector_len = 0; |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
4992 |
__ vextracti128_high($tmp$$XMMRegister, $src2$$XMMRegister); |
30624 | 4993 |
__ vpaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $src2$$XMMRegister, vector_len); |
4994 |
__ pshufd($tmp2$$XMMRegister, $tmp$$XMMRegister, 0xE); |
|
4995 |
__ vpaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, vector_len); |
|
4996 |
__ pshufd($tmp2$$XMMRegister, $tmp$$XMMRegister, 0x1); |
|
4997 |
__ vpaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, vector_len); |
|
4998 |
__ movdl($tmp2$$XMMRegister, $src1$$Register); |
|
4999 |
__ vpaddd($tmp2$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, vector_len); |
|
5000 |
__ movdl($dst$$Register, $tmp2$$XMMRegister); |
|
5001 |
%} |
|
5002 |
ins_pipe( pipe_slow ); |
|
5003 |
%} |
|
5004 |
||
51857 | 5005 |
instruct rvadd16I_reduction_reg_evex(rRegI dst, rRegI src1, legVecZ src2, legVecZ tmp, legVecZ tmp2, legVecZ tmp3) %{ |
30624 | 5006 |
predicate(UseAVX > 2); |
5007 |
match(Set dst (AddReductionVI src1 src2)); |
|
5008 |
effect(TEMP tmp, TEMP tmp2, TEMP tmp3); |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5009 |
format %{ "vextracti64x4_high $tmp3,$src2\n\t" |
30624 | 5010 |
"vpaddd $tmp3,$tmp3,$src2\n\t" |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5011 |
"vextracti128_high $tmp,$tmp3\n\t" |
30624 | 5012 |
"vpaddd $tmp,$tmp,$tmp3\n\t" |
5013 |
"pshufd $tmp2,$tmp,0xE\n\t" |
|
5014 |
"vpaddd $tmp,$tmp,$tmp2\n\t" |
|
5015 |
"pshufd $tmp2,$tmp,0x1\n\t" |
|
30211 | 5016 |
"vpaddd $tmp,$tmp,$tmp2\n\t" |
5017 |
"movd $tmp2,$src1\n\t" |
|
30624 | 5018 |
"vpaddd $tmp2,$tmp,$tmp2\n\t" |
5019 |
"movd $dst,$tmp2\t! mul reduction16I" %} |
|
5020 |
ins_encode %{ |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5021 |
__ vextracti64x4_high($tmp3$$XMMRegister, $src2$$XMMRegister); |
30624 | 5022 |
__ vpaddd($tmp3$$XMMRegister, $tmp3$$XMMRegister, $src2$$XMMRegister, 1); |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5023 |
__ vextracti128_high($tmp$$XMMRegister, $tmp3$$XMMRegister); |
30624 | 5024 |
__ vpaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp3$$XMMRegister, 0); |
5025 |
__ pshufd($tmp2$$XMMRegister, $tmp$$XMMRegister, 0xE); |
|
5026 |
__ vpaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, 0); |
|
5027 |
__ pshufd($tmp2$$XMMRegister, $tmp$$XMMRegister, 0x1); |
|
5028 |
__ vpaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, 0); |
|
30211 | 5029 |
__ movdl($tmp2$$XMMRegister, $src1$$Register); |
30624 | 5030 |
__ vpaddd($tmp2$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, 0); |
30211 | 5031 |
__ movdl($dst$$Register, $tmp2$$XMMRegister); |
5032 |
%} |
|
5033 |
ins_pipe( pipe_slow ); |
|
5034 |
%} |
|
5035 |
||
30624 | 5036 |
#ifdef _LP64 |
51857 | 5037 |
instruct rvadd2L_reduction_reg(rRegL dst, rRegL src1, vecX src2, vecX tmp, vecX tmp2) %{ |
30624 | 5038 |
predicate(UseAVX > 2); |
5039 |
match(Set dst (AddReductionVL src1 src2)); |
|
5040 |
effect(TEMP tmp, TEMP tmp2); |
|
5041 |
format %{ "pshufd $tmp2,$src2,0xE\n\t" |
|
5042 |
"vpaddq $tmp,$src2,$tmp2\n\t" |
|
5043 |
"movdq $tmp2,$src1\n\t" |
|
5044 |
"vpaddq $tmp2,$tmp,$tmp2\n\t" |
|
5045 |
"movdq $dst,$tmp2\t! add reduction2L" %} |
|
5046 |
ins_encode %{ |
|
5047 |
__ pshufd($tmp2$$XMMRegister, $src2$$XMMRegister, 0xE); |
|
5048 |
__ vpaddq($tmp$$XMMRegister, $src2$$XMMRegister, $tmp2$$XMMRegister, 0); |
|
5049 |
__ movdq($tmp2$$XMMRegister, $src1$$Register); |
|
5050 |
__ vpaddq($tmp2$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, 0); |
|
5051 |
__ movdq($dst$$Register, $tmp2$$XMMRegister); |
|
5052 |
%} |
|
5053 |
ins_pipe( pipe_slow ); |
|
5054 |
%} |
|
5055 |
||
51857 | 5056 |
instruct rvadd4L_reduction_reg(rRegL dst, rRegL src1, vecY src2, vecY tmp, vecY tmp2) %{ |
30624 | 5057 |
predicate(UseAVX > 2); |
5058 |
match(Set dst (AddReductionVL src1 src2)); |
|
5059 |
effect(TEMP tmp, TEMP tmp2); |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5060 |
format %{ "vextracti128_high $tmp,$src2\n\t" |
30624 | 5061 |
"vpaddq $tmp2,$tmp,$src2\n\t" |
5062 |
"pshufd $tmp,$tmp2,0xE\n\t" |
|
5063 |
"vpaddq $tmp2,$tmp2,$tmp\n\t" |
|
5064 |
"movdq $tmp,$src1\n\t" |
|
5065 |
"vpaddq $tmp2,$tmp2,$tmp\n\t" |
|
5066 |
"movdq $dst,$tmp2\t! add reduction4L" %} |
|
5067 |
ins_encode %{ |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5068 |
__ vextracti128_high($tmp$$XMMRegister, $src2$$XMMRegister); |
30624 | 5069 |
__ vpaddq($tmp2$$XMMRegister, $tmp$$XMMRegister, $src2$$XMMRegister, 0); |
5070 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE); |
|
5071 |
__ vpaddq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, 0); |
|
5072 |
__ movdq($tmp$$XMMRegister, $src1$$Register); |
|
5073 |
__ vpaddq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, 0); |
|
5074 |
__ movdq($dst$$Register, $tmp2$$XMMRegister); |
|
5075 |
%} |
|
5076 |
ins_pipe( pipe_slow ); |
|
5077 |
%} |
|
5078 |
||
51857 | 5079 |
instruct rvadd8L_reduction_reg(rRegL dst, rRegL src1, legVecZ src2, legVecZ tmp, legVecZ tmp2) %{ |
30624 | 5080 |
predicate(UseAVX > 2); |
5081 |
match(Set dst (AddReductionVL src1 src2)); |
|
5082 |
effect(TEMP tmp, TEMP tmp2); |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5083 |
format %{ "vextracti64x4_high $tmp2,$src2\n\t" |
30624 | 5084 |
"vpaddq $tmp2,$tmp2,$src2\n\t" |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5085 |
"vextracti128_high $tmp,$tmp2\n\t" |
30624 | 5086 |
"vpaddq $tmp2,$tmp2,$tmp\n\t" |
5087 |
"pshufd $tmp,$tmp2,0xE\n\t" |
|
5088 |
"vpaddq $tmp2,$tmp2,$tmp\n\t" |
|
5089 |
"movdq $tmp,$src1\n\t" |
|
5090 |
"vpaddq $tmp2,$tmp2,$tmp\n\t" |
|
5091 |
"movdq $dst,$tmp2\t! add reduction8L" %} |
|
5092 |
ins_encode %{ |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5093 |
__ vextracti64x4_high($tmp2$$XMMRegister, $src2$$XMMRegister); |
30624 | 5094 |
__ vpaddq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $src2$$XMMRegister, 1); |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5095 |
__ vextracti128_high($tmp$$XMMRegister, $tmp2$$XMMRegister); |
30624 | 5096 |
__ vpaddq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, 0); |
5097 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE); |
|
5098 |
__ vpaddq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, 0); |
|
5099 |
__ movdq($tmp$$XMMRegister, $src1$$Register); |
|
5100 |
__ vpaddq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, 0); |
|
5101 |
__ movdq($dst$$Register, $tmp2$$XMMRegister); |
|
5102 |
%} |
|
5103 |
ins_pipe( pipe_slow ); |
|
5104 |
%} |
|
5105 |
#endif |
|
5106 |
||
51857 | 5107 |
instruct rsadd2F_reduction_reg(regF dst, vecD src2, vecD tmp) %{ |
30211 | 5108 |
predicate(UseSSE >= 1 && UseAVX == 0); |
34162 | 5109 |
match(Set dst (AddReductionVF dst src2)); |
5110 |
effect(TEMP dst, TEMP tmp); |
|
5111 |
format %{ "addss $dst,$src2\n\t" |
|
5112 |
"pshufd $tmp,$src2,0x01\n\t" |
|
5113 |
"addss $dst,$tmp\t! add reduction2F" %} |
|
5114 |
ins_encode %{ |
|
5115 |
__ addss($dst$$XMMRegister, $src2$$XMMRegister); |
|
5116 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x01); |
|
5117 |
__ addss($dst$$XMMRegister, $tmp$$XMMRegister); |
|
5118 |
%} |
|
5119 |
ins_pipe( pipe_slow ); |
|
5120 |
%} |
|
5121 |
||
51857 | 5122 |
instruct rvadd2F_reduction_reg(regF dst, vecD src2, vecD tmp) %{ |
30211 | 5123 |
predicate(UseAVX > 0); |
34162 | 5124 |
match(Set dst (AddReductionVF dst src2)); |
5125 |
effect(TEMP dst, TEMP tmp); |
|
5126 |
format %{ "vaddss $dst,$dst,$src2\n\t" |
|
30211 | 5127 |
"pshufd $tmp,$src2,0x01\n\t" |
34162 | 5128 |
"vaddss $dst,$dst,$tmp\t! add reduction2F" %} |
5129 |
ins_encode %{ |
|
5130 |
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $src2$$XMMRegister); |
|
30211 | 5131 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x01); |
34162 | 5132 |
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
5133 |
%} |
|
5134 |
ins_pipe( pipe_slow ); |
|
5135 |
%} |
|
5136 |
||
51857 | 5137 |
instruct rsadd4F_reduction_reg(regF dst, vecX src2, vecX tmp) %{ |
30211 | 5138 |
predicate(UseSSE >= 1 && UseAVX == 0); |
34162 | 5139 |
match(Set dst (AddReductionVF dst src2)); |
5140 |
effect(TEMP dst, TEMP tmp); |
|
5141 |
format %{ "addss $dst,$src2\n\t" |
|
5142 |
"pshufd $tmp,$src2,0x01\n\t" |
|
5143 |
"addss $dst,$tmp\n\t" |
|
5144 |
"pshufd $tmp,$src2,0x02\n\t" |
|
5145 |
"addss $dst,$tmp\n\t" |
|
5146 |
"pshufd $tmp,$src2,0x03\n\t" |
|
5147 |
"addss $dst,$tmp\t! add reduction4F" %} |
|
5148 |
ins_encode %{ |
|
5149 |
__ addss($dst$$XMMRegister, $src2$$XMMRegister); |
|
5150 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x01); |
|
5151 |
__ addss($dst$$XMMRegister, $tmp$$XMMRegister); |
|
5152 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x02); |
|
5153 |
__ addss($dst$$XMMRegister, $tmp$$XMMRegister); |
|
5154 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x03); |
|
5155 |
__ addss($dst$$XMMRegister, $tmp$$XMMRegister); |
|
5156 |
%} |
|
5157 |
ins_pipe( pipe_slow ); |
|
5158 |
%} |
|
5159 |
||
51857 | 5160 |
instruct rvadd4F_reduction_reg(regF dst, vecX src2, vecX tmp) %{ |
30211 | 5161 |
predicate(UseAVX > 0); |
34162 | 5162 |
match(Set dst (AddReductionVF dst src2)); |
5163 |
effect(TEMP tmp, TEMP dst); |
|
5164 |
format %{ "vaddss $dst,dst,$src2\n\t" |
|
30211 | 5165 |
"pshufd $tmp,$src2,0x01\n\t" |
34162 | 5166 |
"vaddss $dst,$dst,$tmp\n\t" |
30211 | 5167 |
"pshufd $tmp,$src2,0x02\n\t" |
34162 | 5168 |
"vaddss $dst,$dst,$tmp\n\t" |
30211 | 5169 |
"pshufd $tmp,$src2,0x03\n\t" |
34162 | 5170 |
"vaddss $dst,$dst,$tmp\t! add reduction4F" %} |
5171 |
ins_encode %{ |
|
5172 |
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $src2$$XMMRegister); |
|
30211 | 5173 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x01); |
34162 | 5174 |
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
30211 | 5175 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x02); |
34162 | 5176 |
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
30211 | 5177 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x03); |
34162 | 5178 |
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
5179 |
%} |
|
5180 |
ins_pipe( pipe_slow ); |
|
5181 |
%} |
|
5182 |
||
51857 | 5183 |
instruct radd8F_reduction_reg(regF dst, vecY src2, vecY tmp, vecY tmp2) %{ |
30211 | 5184 |
predicate(UseAVX > 0); |
34162 | 5185 |
match(Set dst (AddReductionVF dst src2)); |
5186 |
effect(TEMP tmp, TEMP dst, TEMP tmp2); |
|
5187 |
format %{ "vaddss $dst,$dst,$src2\n\t" |
|
30211 | 5188 |
"pshufd $tmp,$src2,0x01\n\t" |
34162 | 5189 |
"vaddss $dst,$dst,$tmp\n\t" |
30211 | 5190 |
"pshufd $tmp,$src2,0x02\n\t" |
34162 | 5191 |
"vaddss $dst,$dst,$tmp\n\t" |
30211 | 5192 |
"pshufd $tmp,$src2,0x03\n\t" |
34162 | 5193 |
"vaddss $dst,$dst,$tmp\n\t" |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5194 |
"vextractf128_high $tmp2,$src2\n\t" |
34162 | 5195 |
"vaddss $dst,$dst,$tmp2\n\t" |
5196 |
"pshufd $tmp,$tmp2,0x01\n\t" |
|
5197 |
"vaddss $dst,$dst,$tmp\n\t" |
|
5198 |
"pshufd $tmp,$tmp2,0x02\n\t" |
|
5199 |
"vaddss $dst,$dst,$tmp\n\t" |
|
5200 |
"pshufd $tmp,$tmp2,0x03\n\t" |
|
5201 |
"vaddss $dst,$dst,$tmp\t! add reduction8F" %} |
|
5202 |
ins_encode %{ |
|
5203 |
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $src2$$XMMRegister); |
|
30211 | 5204 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x01); |
34162 | 5205 |
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
30211 | 5206 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x02); |
34162 | 5207 |
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
30211 | 5208 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x03); |
34162 | 5209 |
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5210 |
__ vextractf128_high($tmp2$$XMMRegister, $src2$$XMMRegister); |
34162 | 5211 |
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister); |
5212 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x01); |
|
5213 |
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
|
5214 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x02); |
|
5215 |
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
|
5216 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x03); |
|
5217 |
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
|
5218 |
%} |
|
5219 |
ins_pipe( pipe_slow ); |
|
5220 |
%} |
|
5221 |
||
51857 | 5222 |
instruct radd16F_reduction_reg(regF dst, legVecZ src2, legVecZ tmp, legVecZ tmp2) %{ |
30624 | 5223 |
predicate(UseAVX > 2); |
34162 | 5224 |
match(Set dst (AddReductionVF dst src2)); |
5225 |
effect(TEMP tmp, TEMP dst, TEMP tmp2); |
|
5226 |
format %{ "vaddss $dst,$dst,$src2\n\t" |
|
30624 | 5227 |
"pshufd $tmp,$src2,0x01\n\t" |
34162 | 5228 |
"vaddss $dst,$dst,$tmp\n\t" |
30624 | 5229 |
"pshufd $tmp,$src2,0x02\n\t" |
34162 | 5230 |
"vaddss $dst,$dst,$tmp\n\t" |
30624 | 5231 |
"pshufd $tmp,$src2,0x03\n\t" |
34162 | 5232 |
"vaddss $dst,$dst,$tmp\n\t" |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5233 |
"vextractf32x4 $tmp2,$src2,0x1\n\t" |
34162 | 5234 |
"vaddss $dst,$dst,$tmp2\n\t" |
5235 |
"pshufd $tmp,$tmp2,0x01\n\t" |
|
5236 |
"vaddss $dst,$dst,$tmp\n\t" |
|
5237 |
"pshufd $tmp,$tmp2,0x02\n\t" |
|
5238 |
"vaddss $dst,$dst,$tmp\n\t" |
|
5239 |
"pshufd $tmp,$tmp2,0x03\n\t" |
|
5240 |
"vaddss $dst,$dst,$tmp\n\t" |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5241 |
"vextractf32x4 $tmp2,$src2,0x2\n\t" |
34162 | 5242 |
"vaddss $dst,$dst,$tmp2\n\t" |
5243 |
"pshufd $tmp,$tmp2,0x01\n\t" |
|
5244 |
"vaddss $dst,$dst,$tmp\n\t" |
|
5245 |
"pshufd $tmp,$tmp2,0x02\n\t" |
|
5246 |
"vaddss $dst,$dst,$tmp\n\t" |
|
5247 |
"pshufd $tmp,$tmp2,0x03\n\t" |
|
5248 |
"vaddss $dst,$dst,$tmp\n\t" |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5249 |
"vextractf32x4 $tmp2,$src2,0x3\n\t" |
34162 | 5250 |
"vaddss $dst,$dst,$tmp2\n\t" |
5251 |
"pshufd $tmp,$tmp2,0x01\n\t" |
|
5252 |
"vaddss $dst,$dst,$tmp\n\t" |
|
5253 |
"pshufd $tmp,$tmp2,0x02\n\t" |
|
5254 |
"vaddss $dst,$dst,$tmp\n\t" |
|
5255 |
"pshufd $tmp,$tmp2,0x03\n\t" |
|
5256 |
"vaddss $dst,$dst,$tmp\t! add reduction16F" %} |
|
5257 |
ins_encode %{ |
|
5258 |
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $src2$$XMMRegister); |
|
30624 | 5259 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x01); |
34162 | 5260 |
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
30624 | 5261 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x02); |
34162 | 5262 |
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
30624 | 5263 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x03); |
34162 | 5264 |
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5265 |
__ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x1); |
34162 | 5266 |
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister); |
5267 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x01); |
|
5268 |
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
|
5269 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x02); |
|
5270 |
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
|
5271 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x03); |
|
5272 |
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5273 |
__ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x2); |
34162 | 5274 |
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister); |
5275 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x01); |
|
5276 |
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
|
5277 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x02); |
|
5278 |
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
|
5279 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x03); |
|
5280 |
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5281 |
__ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x3); |
34162 | 5282 |
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister); |
5283 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x01); |
|
5284 |
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
|
5285 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x02); |
|
5286 |
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
|
5287 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x03); |
|
5288 |
__ vaddss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
|
5289 |
%} |
|
5290 |
ins_pipe( pipe_slow ); |
|
5291 |
%} |
|
5292 |
||
51857 | 5293 |
instruct rsadd2D_reduction_reg(regD dst, vecX src2, vecX tmp) %{ |
30211 | 5294 |
predicate(UseSSE >= 1 && UseAVX == 0); |
34162 | 5295 |
match(Set dst (AddReductionVD dst src2)); |
30211 | 5296 |
effect(TEMP tmp, TEMP dst); |
34162 | 5297 |
format %{ "addsd $dst,$src2\n\t" |
5298 |
"pshufd $tmp,$src2,0xE\n\t" |
|
30211 | 5299 |
"addsd $dst,$tmp\t! add reduction2D" %} |
5300 |
ins_encode %{ |
|
34162 | 5301 |
__ addsd($dst$$XMMRegister, $src2$$XMMRegister); |
5302 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0xE); |
|
30211 | 5303 |
__ addsd($dst$$XMMRegister, $tmp$$XMMRegister); |
5304 |
%} |
|
5305 |
ins_pipe( pipe_slow ); |
|
5306 |
%} |
|
5307 |
||
51857 | 5308 |
instruct rvadd2D_reduction_reg(regD dst, vecX src2, vecX tmp) %{ |
30211 | 5309 |
predicate(UseAVX > 0); |
34162 | 5310 |
match(Set dst (AddReductionVD dst src2)); |
5311 |
effect(TEMP tmp, TEMP dst); |
|
5312 |
format %{ "vaddsd $dst,$dst,$src2\n\t" |
|
30211 | 5313 |
"pshufd $tmp,$src2,0xE\n\t" |
34162 | 5314 |
"vaddsd $dst,$dst,$tmp\t! add reduction2D" %} |
5315 |
ins_encode %{ |
|
5316 |
__ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $src2$$XMMRegister); |
|
30211 | 5317 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0xE); |
34162 | 5318 |
__ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
5319 |
%} |
|
5320 |
ins_pipe( pipe_slow ); |
|
5321 |
%} |
|
5322 |
||
51857 | 5323 |
instruct rvadd4D_reduction_reg(regD dst, vecY src2, vecX tmp, vecX tmp2) %{ |
30211 | 5324 |
predicate(UseAVX > 0); |
34162 | 5325 |
match(Set dst (AddReductionVD dst src2)); |
5326 |
effect(TEMP tmp, TEMP dst, TEMP tmp2); |
|
5327 |
format %{ "vaddsd $dst,$dst,$src2\n\t" |
|
30211 | 5328 |
"pshufd $tmp,$src2,0xE\n\t" |
34162 | 5329 |
"vaddsd $dst,$dst,$tmp\n\t" |
51857 | 5330 |
"vextractf128 $tmp2,$src2,0x1\n\t" |
34162 | 5331 |
"vaddsd $dst,$dst,$tmp2\n\t" |
5332 |
"pshufd $tmp,$tmp2,0xE\n\t" |
|
5333 |
"vaddsd $dst,$dst,$tmp\t! add reduction4D" %} |
|
5334 |
ins_encode %{ |
|
5335 |
__ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $src2$$XMMRegister); |
|
30211 | 5336 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0xE); |
34162 | 5337 |
__ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
51857 | 5338 |
__ vextractf128($tmp2$$XMMRegister, $src2$$XMMRegister, 0x1); |
34162 | 5339 |
__ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister); |
5340 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE); |
|
5341 |
__ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
|
5342 |
%} |
|
5343 |
ins_pipe( pipe_slow ); |
|
5344 |
%} |
|
5345 |
||
51857 | 5346 |
instruct rvadd8D_reduction_reg(regD dst, legVecZ src2, legVecZ tmp, legVecZ tmp2) %{ |
30624 | 5347 |
predicate(UseAVX > 2); |
34162 | 5348 |
match(Set dst (AddReductionVD dst src2)); |
5349 |
effect(TEMP tmp, TEMP dst, TEMP tmp2); |
|
5350 |
format %{ "vaddsd $dst,$dst,$src2\n\t" |
|
30624 | 5351 |
"pshufd $tmp,$src2,0xE\n\t" |
34162 | 5352 |
"vaddsd $dst,$dst,$tmp\n\t" |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5353 |
"vextractf32x4 $tmp2,$src2,0x1\n\t" |
34162 | 5354 |
"vaddsd $dst,$dst,$tmp2\n\t" |
5355 |
"pshufd $tmp,$tmp2,0xE\n\t" |
|
5356 |
"vaddsd $dst,$dst,$tmp\n\t" |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5357 |
"vextractf32x4 $tmp2,$src2,0x2\n\t" |
34162 | 5358 |
"vaddsd $dst,$dst,$tmp2\n\t" |
5359 |
"pshufd $tmp,$tmp2,0xE\n\t" |
|
5360 |
"vaddsd $dst,$dst,$tmp\n\t" |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5361 |
"vextractf32x4 $tmp2,$src2,0x3\n\t" |
34162 | 5362 |
"vaddsd $dst,$dst,$tmp2\n\t" |
5363 |
"pshufd $tmp,$tmp2,0xE\n\t" |
|
5364 |
"vaddsd $dst,$dst,$tmp\t! add reduction8D" %} |
|
5365 |
ins_encode %{ |
|
5366 |
__ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $src2$$XMMRegister); |
|
30624 | 5367 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0xE); |
34162 | 5368 |
__ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5369 |
__ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x1); |
34162 | 5370 |
__ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister); |
5371 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE); |
|
5372 |
__ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5373 |
__ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x2); |
34162 | 5374 |
__ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister); |
5375 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE); |
|
5376 |
__ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5377 |
__ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x3); |
34162 | 5378 |
__ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister); |
5379 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE); |
|
5380 |
__ vaddsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
|
30624 | 5381 |
%} |
5382 |
ins_pipe( pipe_slow ); |
|
5383 |
%} |
|
5384 |
||
51857 | 5385 |
instruct rsmul2I_reduction_reg(rRegI dst, rRegI src1, vecD src2, vecD tmp, vecD tmp2) %{ |
30211 | 5386 |
predicate(UseSSE > 3 && UseAVX == 0); |
5387 |
match(Set dst (MulReductionVI src1 src2)); |
|
5388 |
effect(TEMP tmp, TEMP tmp2); |
|
5389 |
format %{ "pshufd $tmp2,$src2,0x1\n\t" |
|
5390 |
"pmulld $tmp2,$src2\n\t" |
|
5391 |
"movd $tmp,$src1\n\t" |
|
5392 |
"pmulld $tmp2,$tmp\n\t" |
|
5393 |
"movd $dst,$tmp2\t! mul reduction2I" %} |
|
5394 |
ins_encode %{ |
|
5395 |
__ pshufd($tmp2$$XMMRegister, $src2$$XMMRegister, 0x1); |
|
5396 |
__ pmulld($tmp2$$XMMRegister, $src2$$XMMRegister); |
|
5397 |
__ movdl($tmp$$XMMRegister, $src1$$Register); |
|
5398 |
__ pmulld($tmp2$$XMMRegister, $tmp$$XMMRegister); |
|
5399 |
__ movdl($dst$$Register, $tmp2$$XMMRegister); |
|
5400 |
%} |
|
5401 |
ins_pipe( pipe_slow ); |
|
5402 |
%} |
|
5403 |
||
51857 | 5404 |
instruct rvmul2I_reduction_reg(rRegI dst, rRegI src1, vecD src2, vecD tmp, vecD tmp2) %{ |
30211 | 5405 |
predicate(UseAVX > 0); |
5406 |
match(Set dst (MulReductionVI src1 src2)); |
|
5407 |
effect(TEMP tmp, TEMP tmp2); |
|
30624 | 5408 |
format %{ "pshufd $tmp2,$src2,0x1\n\t" |
5409 |
"vpmulld $tmp,$src2,$tmp2\n\t" |
|
5410 |
"movd $tmp2,$src1\n\t" |
|
5411 |
"vpmulld $tmp2,$tmp,$tmp2\n\t" |
|
5412 |
"movd $dst,$tmp2\t! mul reduction2I" %} |
|
5413 |
ins_encode %{ |
|
5414 |
int vector_len = 0; |
|
30211 | 5415 |
__ pshufd($tmp2$$XMMRegister, $src2$$XMMRegister, 0x1); |
30624 | 5416 |
__ vpmulld($tmp$$XMMRegister, $src2$$XMMRegister, $tmp2$$XMMRegister, vector_len); |
30211 | 5417 |
__ movdl($tmp2$$XMMRegister, $src1$$Register); |
30624 | 5418 |
__ vpmulld($tmp2$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, vector_len); |
30211 | 5419 |
__ movdl($dst$$Register, $tmp2$$XMMRegister); |
5420 |
%} |
|
5421 |
ins_pipe( pipe_slow ); |
|
5422 |
%} |
|
5423 |
||
51857 | 5424 |
instruct rsmul4I_reduction_reg(rRegI dst, rRegI src1, vecX src2, vecX tmp, vecX tmp2) %{ |
30211 | 5425 |
predicate(UseSSE > 3 && UseAVX == 0); |
5426 |
match(Set dst (MulReductionVI src1 src2)); |
|
5427 |
effect(TEMP tmp, TEMP tmp2); |
|
5428 |
format %{ "pshufd $tmp2,$src2,0xE\n\t" |
|
5429 |
"pmulld $tmp2,$src2\n\t" |
|
5430 |
"pshufd $tmp,$tmp2,0x1\n\t" |
|
5431 |
"pmulld $tmp2,$tmp\n\t" |
|
5432 |
"movd $tmp,$src1\n\t" |
|
5433 |
"pmulld $tmp2,$tmp\n\t" |
|
5434 |
"movd $dst,$tmp2\t! mul reduction4I" %} |
|
5435 |
ins_encode %{ |
|
5436 |
__ pshufd($tmp2$$XMMRegister, $src2$$XMMRegister, 0xE); |
|
5437 |
__ pmulld($tmp2$$XMMRegister, $src2$$XMMRegister); |
|
5438 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x1); |
|
5439 |
__ pmulld($tmp2$$XMMRegister, $tmp$$XMMRegister); |
|
5440 |
__ movdl($tmp$$XMMRegister, $src1$$Register); |
|
5441 |
__ pmulld($tmp2$$XMMRegister, $tmp$$XMMRegister); |
|
5442 |
__ movdl($dst$$Register, $tmp2$$XMMRegister); |
|
5443 |
%} |
|
5444 |
ins_pipe( pipe_slow ); |
|
5445 |
%} |
|
5446 |
||
51857 | 5447 |
instruct rvmul4I_reduction_reg(rRegI dst, rRegI src1, vecX src2, vecX tmp, vecX tmp2) %{ |
30211 | 5448 |
predicate(UseAVX > 0); |
5449 |
match(Set dst (MulReductionVI src1 src2)); |
|
5450 |
effect(TEMP tmp, TEMP tmp2); |
|
30624 | 5451 |
format %{ "pshufd $tmp2,$src2,0xE\n\t" |
5452 |
"vpmulld $tmp,$src2,$tmp2\n\t" |
|
5453 |
"pshufd $tmp2,$tmp,0x1\n\t" |
|
5454 |
"vpmulld $tmp,$tmp,$tmp2\n\t" |
|
5455 |
"movd $tmp2,$src1\n\t" |
|
5456 |
"vpmulld $tmp2,$tmp,$tmp2\n\t" |
|
5457 |
"movd $dst,$tmp2\t! mul reduction4I" %} |
|
5458 |
ins_encode %{ |
|
5459 |
int vector_len = 0; |
|
30211 | 5460 |
__ pshufd($tmp2$$XMMRegister, $src2$$XMMRegister, 0xE); |
30624 | 5461 |
__ vpmulld($tmp$$XMMRegister, $src2$$XMMRegister, $tmp2$$XMMRegister, vector_len); |
30211 | 5462 |
__ pshufd($tmp2$$XMMRegister, $tmp$$XMMRegister, 0x1); |
30624 | 5463 |
__ vpmulld($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, vector_len); |
30211 | 5464 |
__ movdl($tmp2$$XMMRegister, $src1$$Register); |
30624 | 5465 |
__ vpmulld($tmp2$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, vector_len); |
30211 | 5466 |
__ movdl($dst$$Register, $tmp2$$XMMRegister); |
5467 |
%} |
|
5468 |
ins_pipe( pipe_slow ); |
|
5469 |
%} |
|
5470 |
||
51857 | 5471 |
instruct rvmul8I_reduction_reg(rRegI dst, rRegI src1, vecY src2, vecY tmp, vecY tmp2) %{ |
5472 |
predicate(UseAVX > 1); |
|
30211 | 5473 |
match(Set dst (MulReductionVI src1 src2)); |
5474 |
effect(TEMP tmp, TEMP tmp2); |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5475 |
format %{ "vextracti128_high $tmp,$src2\n\t" |
30624 | 5476 |
"vpmulld $tmp,$tmp,$src2\n\t" |
5477 |
"pshufd $tmp2,$tmp,0xE\n\t" |
|
5478 |
"vpmulld $tmp,$tmp,$tmp2\n\t" |
|
5479 |
"pshufd $tmp2,$tmp,0x1\n\t" |
|
5480 |
"vpmulld $tmp,$tmp,$tmp2\n\t" |
|
5481 |
"movd $tmp2,$src1\n\t" |
|
5482 |
"vpmulld $tmp2,$tmp,$tmp2\n\t" |
|
5483 |
"movd $dst,$tmp2\t! mul reduction8I" %} |
|
5484 |
ins_encode %{ |
|
5485 |
int vector_len = 0; |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5486 |
__ vextracti128_high($tmp$$XMMRegister, $src2$$XMMRegister); |
30624 | 5487 |
__ vpmulld($tmp$$XMMRegister, $tmp$$XMMRegister, $src2$$XMMRegister, vector_len); |
5488 |
__ pshufd($tmp2$$XMMRegister, $tmp$$XMMRegister, 0xE); |
|
5489 |
__ vpmulld($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, vector_len); |
|
5490 |
__ pshufd($tmp2$$XMMRegister, $tmp$$XMMRegister, 0x1); |
|
5491 |
__ vpmulld($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, vector_len); |
|
5492 |
__ movdl($tmp2$$XMMRegister, $src1$$Register); |
|
5493 |
__ vpmulld($tmp2$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, vector_len); |
|
5494 |
__ movdl($dst$$Register, $tmp2$$XMMRegister); |
|
5495 |
%} |
|
5496 |
ins_pipe( pipe_slow ); |
|
5497 |
%} |
|
5498 |
||
51857 | 5499 |
instruct rvmul16I_reduction_reg(rRegI dst, rRegI src1, legVecZ src2, legVecZ tmp, legVecZ tmp2, legVecZ tmp3) %{ |
30624 | 5500 |
predicate(UseAVX > 2); |
5501 |
match(Set dst (MulReductionVI src1 src2)); |
|
5502 |
effect(TEMP tmp, TEMP tmp2, TEMP tmp3); |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5503 |
format %{ "vextracti64x4_high $tmp3,$src2\n\t" |
30624 | 5504 |
"vpmulld $tmp3,$tmp3,$src2\n\t" |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5505 |
"vextracti128_high $tmp,$tmp3\n\t" |
30624 | 5506 |
"vpmulld $tmp,$tmp,$src2\n\t" |
5507 |
"pshufd $tmp2,$tmp,0xE\n\t" |
|
5508 |
"vpmulld $tmp,$tmp,$tmp2\n\t" |
|
5509 |
"pshufd $tmp2,$tmp,0x1\n\t" |
|
5510 |
"vpmulld $tmp,$tmp,$tmp2\n\t" |
|
5511 |
"movd $tmp2,$src1\n\t" |
|
5512 |
"vpmulld $tmp2,$tmp,$tmp2\n\t" |
|
5513 |
"movd $dst,$tmp2\t! mul reduction16I" %} |
|
5514 |
ins_encode %{ |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5515 |
__ vextracti64x4_high($tmp3$$XMMRegister, $src2$$XMMRegister); |
30624 | 5516 |
__ vpmulld($tmp3$$XMMRegister, $tmp3$$XMMRegister, $src2$$XMMRegister, 1); |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5517 |
__ vextracti128_high($tmp$$XMMRegister, $tmp3$$XMMRegister); |
30624 | 5518 |
__ vpmulld($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp3$$XMMRegister, 0); |
30211 | 5519 |
__ pshufd($tmp2$$XMMRegister, $tmp$$XMMRegister, 0xE); |
30624 | 5520 |
__ vpmulld($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, 0); |
30211 | 5521 |
__ pshufd($tmp2$$XMMRegister, $tmp$$XMMRegister, 0x1); |
30624 | 5522 |
__ vpmulld($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, 0); |
30211 | 5523 |
__ movdl($tmp2$$XMMRegister, $src1$$Register); |
30624 | 5524 |
__ vpmulld($tmp2$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, 0); |
30211 | 5525 |
__ movdl($dst$$Register, $tmp2$$XMMRegister); |
5526 |
%} |
|
5527 |
ins_pipe( pipe_slow ); |
|
5528 |
%} |
|
5529 |
||
30624 | 5530 |
#ifdef _LP64 |
51857 | 5531 |
instruct rvmul2L_reduction_reg(rRegL dst, rRegL src1, vecX src2, vecX tmp, vecX tmp2) %{ |
30624 | 5532 |
predicate(UseAVX > 2 && VM_Version::supports_avx512dq()); |
5533 |
match(Set dst (MulReductionVL src1 src2)); |
|
5534 |
effect(TEMP tmp, TEMP tmp2); |
|
5535 |
format %{ "pshufd $tmp2,$src2,0xE\n\t" |
|
5536 |
"vpmullq $tmp,$src2,$tmp2\n\t" |
|
5537 |
"movdq $tmp2,$src1\n\t" |
|
5538 |
"vpmullq $tmp2,$tmp,$tmp2\n\t" |
|
5539 |
"movdq $dst,$tmp2\t! mul reduction2L" %} |
|
5540 |
ins_encode %{ |
|
5541 |
__ pshufd($tmp2$$XMMRegister, $src2$$XMMRegister, 0xE); |
|
5542 |
__ vpmullq($tmp$$XMMRegister, $src2$$XMMRegister, $tmp2$$XMMRegister, 0); |
|
5543 |
__ movdq($tmp2$$XMMRegister, $src1$$Register); |
|
5544 |
__ vpmullq($tmp2$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, 0); |
|
5545 |
__ movdq($dst$$Register, $tmp2$$XMMRegister); |
|
5546 |
%} |
|
5547 |
ins_pipe( pipe_slow ); |
|
5548 |
%} |
|
5549 |
||
51857 | 5550 |
instruct rvmul4L_reduction_reg(rRegL dst, rRegL src1, vecY src2, vecY tmp, vecY tmp2) %{ |
30624 | 5551 |
predicate(UseAVX > 2 && VM_Version::supports_avx512dq()); |
5552 |
match(Set dst (MulReductionVL src1 src2)); |
|
5553 |
effect(TEMP tmp, TEMP tmp2); |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5554 |
format %{ "vextracti128_high $tmp,$src2\n\t" |
30624 | 5555 |
"vpmullq $tmp2,$tmp,$src2\n\t" |
5556 |
"pshufd $tmp,$tmp2,0xE\n\t" |
|
5557 |
"vpmullq $tmp2,$tmp2,$tmp\n\t" |
|
5558 |
"movdq $tmp,$src1\n\t" |
|
5559 |
"vpmullq $tmp2,$tmp2,$tmp\n\t" |
|
5560 |
"movdq $dst,$tmp2\t! mul reduction4L" %} |
|
5561 |
ins_encode %{ |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5562 |
__ vextracti128_high($tmp$$XMMRegister, $src2$$XMMRegister); |
30624 | 5563 |
__ vpmullq($tmp2$$XMMRegister, $tmp$$XMMRegister, $src2$$XMMRegister, 0); |
5564 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE); |
|
5565 |
__ vpmullq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, 0); |
|
5566 |
__ movdq($tmp$$XMMRegister, $src1$$Register); |
|
5567 |
__ vpmullq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, 0); |
|
5568 |
__ movdq($dst$$Register, $tmp2$$XMMRegister); |
|
5569 |
%} |
|
5570 |
ins_pipe( pipe_slow ); |
|
5571 |
%} |
|
5572 |
||
51857 | 5573 |
instruct rvmul8L_reduction_reg(rRegL dst, rRegL src1, legVecZ src2, legVecZ tmp, legVecZ tmp2) %{ |
30624 | 5574 |
predicate(UseAVX > 2 && VM_Version::supports_avx512dq()); |
5575 |
match(Set dst (MulReductionVL src1 src2)); |
|
5576 |
effect(TEMP tmp, TEMP tmp2); |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5577 |
format %{ "vextracti64x4_high $tmp2,$src2\n\t" |
30624 | 5578 |
"vpmullq $tmp2,$tmp2,$src2\n\t" |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5579 |
"vextracti128_high $tmp,$tmp2\n\t" |
30624 | 5580 |
"vpmullq $tmp2,$tmp2,$tmp\n\t" |
5581 |
"pshufd $tmp,$tmp2,0xE\n\t" |
|
5582 |
"vpmullq $tmp2,$tmp2,$tmp\n\t" |
|
5583 |
"movdq $tmp,$src1\n\t" |
|
5584 |
"vpmullq $tmp2,$tmp2,$tmp\n\t" |
|
5585 |
"movdq $dst,$tmp2\t! mul reduction8L" %} |
|
5586 |
ins_encode %{ |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5587 |
__ vextracti64x4_high($tmp2$$XMMRegister, $src2$$XMMRegister); |
30624 | 5588 |
__ vpmullq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $src2$$XMMRegister, 1); |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5589 |
__ vextracti128_high($tmp$$XMMRegister, $tmp2$$XMMRegister); |
30624 | 5590 |
__ vpmullq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, 0); |
5591 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE); |
|
5592 |
__ vpmullq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, 0); |
|
5593 |
__ movdq($tmp$$XMMRegister, $src1$$Register); |
|
5594 |
__ vpmullq($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, 0); |
|
5595 |
__ movdq($dst$$Register, $tmp2$$XMMRegister); |
|
5596 |
%} |
|
5597 |
ins_pipe( pipe_slow ); |
|
5598 |
%} |
|
5599 |
#endif |
|
5600 |
||
51857 | 5601 |
instruct rsmul2F_reduction(regF dst, vecD src2, vecD tmp) %{ |
30211 | 5602 |
predicate(UseSSE >= 1 && UseAVX == 0); |
34162 | 5603 |
match(Set dst (MulReductionVF dst src2)); |
5604 |
effect(TEMP dst, TEMP tmp); |
|
5605 |
format %{ "mulss $dst,$src2\n\t" |
|
5606 |
"pshufd $tmp,$src2,0x01\n\t" |
|
5607 |
"mulss $dst,$tmp\t! mul reduction2F" %} |
|
5608 |
ins_encode %{ |
|
5609 |
__ mulss($dst$$XMMRegister, $src2$$XMMRegister); |
|
5610 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x01); |
|
5611 |
__ mulss($dst$$XMMRegister, $tmp$$XMMRegister); |
|
5612 |
%} |
|
5613 |
ins_pipe( pipe_slow ); |
|
5614 |
%} |
|
5615 |
||
51857 | 5616 |
instruct rvmul2F_reduction_reg(regF dst, vecD src2, vecD tmp) %{ |
30211 | 5617 |
predicate(UseAVX > 0); |
34162 | 5618 |
match(Set dst (MulReductionVF dst src2)); |
5619 |
effect(TEMP tmp, TEMP dst); |
|
5620 |
format %{ "vmulss $dst,$dst,$src2\n\t" |
|
30211 | 5621 |
"pshufd $tmp,$src2,0x01\n\t" |
34162 | 5622 |
"vmulss $dst,$dst,$tmp\t! mul reduction2F" %} |
5623 |
ins_encode %{ |
|
5624 |
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $src2$$XMMRegister); |
|
30211 | 5625 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x01); |
34162 | 5626 |
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
5627 |
%} |
|
5628 |
ins_pipe( pipe_slow ); |
|
5629 |
%} |
|
5630 |
||
51857 | 5631 |
instruct rsmul4F_reduction_reg(regF dst, vecX src2, vecX tmp) %{ |
30211 | 5632 |
predicate(UseSSE >= 1 && UseAVX == 0); |
34162 | 5633 |
match(Set dst (MulReductionVF dst src2)); |
5634 |
effect(TEMP dst, TEMP tmp); |
|
5635 |
format %{ "mulss $dst,$src2\n\t" |
|
5636 |
"pshufd $tmp,$src2,0x01\n\t" |
|
5637 |
"mulss $dst,$tmp\n\t" |
|
5638 |
"pshufd $tmp,$src2,0x02\n\t" |
|
5639 |
"mulss $dst,$tmp\n\t" |
|
5640 |
"pshufd $tmp,$src2,0x03\n\t" |
|
5641 |
"mulss $dst,$tmp\t! mul reduction4F" %} |
|
5642 |
ins_encode %{ |
|
5643 |
__ mulss($dst$$XMMRegister, $src2$$XMMRegister); |
|
5644 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x01); |
|
5645 |
__ mulss($dst$$XMMRegister, $tmp$$XMMRegister); |
|
5646 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x02); |
|
5647 |
__ mulss($dst$$XMMRegister, $tmp$$XMMRegister); |
|
5648 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x03); |
|
5649 |
__ mulss($dst$$XMMRegister, $tmp$$XMMRegister); |
|
5650 |
%} |
|
5651 |
ins_pipe( pipe_slow ); |
|
5652 |
%} |
|
5653 |
||
51857 | 5654 |
instruct rvmul4F_reduction_reg(regF dst, vecX src2, vecX tmp) %{ |
30211 | 5655 |
predicate(UseAVX > 0); |
34162 | 5656 |
match(Set dst (MulReductionVF dst src2)); |
5657 |
effect(TEMP tmp, TEMP dst); |
|
5658 |
format %{ "vmulss $dst,$dst,$src2\n\t" |
|
30211 | 5659 |
"pshufd $tmp,$src2,0x01\n\t" |
34162 | 5660 |
"vmulss $dst,$dst,$tmp\n\t" |
30211 | 5661 |
"pshufd $tmp,$src2,0x02\n\t" |
34162 | 5662 |
"vmulss $dst,$dst,$tmp\n\t" |
30211 | 5663 |
"pshufd $tmp,$src2,0x03\n\t" |
34162 | 5664 |
"vmulss $dst,$dst,$tmp\t! mul reduction4F" %} |
5665 |
ins_encode %{ |
|
5666 |
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $src2$$XMMRegister); |
|
30211 | 5667 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x01); |
34162 | 5668 |
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
30211 | 5669 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x02); |
34162 | 5670 |
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
30211 | 5671 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x03); |
34162 | 5672 |
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
5673 |
%} |
|
5674 |
ins_pipe( pipe_slow ); |
|
5675 |
%} |
|
5676 |
||
51857 | 5677 |
instruct rvmul8F_reduction_reg(regF dst, vecY src2, vecY tmp, vecY tmp2) %{ |
30211 | 5678 |
predicate(UseAVX > 0); |
34162 | 5679 |
match(Set dst (MulReductionVF dst src2)); |
5680 |
effect(TEMP tmp, TEMP dst, TEMP tmp2); |
|
5681 |
format %{ "vmulss $dst,$dst,$src2\n\t" |
|
30211 | 5682 |
"pshufd $tmp,$src2,0x01\n\t" |
34162 | 5683 |
"vmulss $dst,$dst,$tmp\n\t" |
30211 | 5684 |
"pshufd $tmp,$src2,0x02\n\t" |
34162 | 5685 |
"vmulss $dst,$dst,$tmp\n\t" |
30211 | 5686 |
"pshufd $tmp,$src2,0x03\n\t" |
34162 | 5687 |
"vmulss $dst,$dst,$tmp\n\t" |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5688 |
"vextractf128_high $tmp2,$src2\n\t" |
34162 | 5689 |
"vmulss $dst,$dst,$tmp2\n\t" |
5690 |
"pshufd $tmp,$tmp2,0x01\n\t" |
|
5691 |
"vmulss $dst,$dst,$tmp\n\t" |
|
5692 |
"pshufd $tmp,$tmp2,0x02\n\t" |
|
5693 |
"vmulss $dst,$dst,$tmp\n\t" |
|
5694 |
"pshufd $tmp,$tmp2,0x03\n\t" |
|
5695 |
"vmulss $dst,$dst,$tmp\t! mul reduction8F" %} |
|
5696 |
ins_encode %{ |
|
5697 |
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $src2$$XMMRegister); |
|
30211 | 5698 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x01); |
34162 | 5699 |
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
30211 | 5700 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x02); |
34162 | 5701 |
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
30211 | 5702 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x03); |
34162 | 5703 |
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5704 |
__ vextractf128_high($tmp2$$XMMRegister, $src2$$XMMRegister); |
34162 | 5705 |
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister); |
5706 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x01); |
|
5707 |
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
|
5708 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x02); |
|
5709 |
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
|
5710 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x03); |
|
5711 |
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
|
5712 |
%} |
|
5713 |
ins_pipe( pipe_slow ); |
|
5714 |
%} |
|
5715 |
||
51857 | 5716 |
instruct rvmul16F_reduction_reg(regF dst, legVecZ src2, legVecZ tmp, legVecZ tmp2) %{ |
30624 | 5717 |
predicate(UseAVX > 2); |
34162 | 5718 |
match(Set dst (MulReductionVF dst src2)); |
5719 |
effect(TEMP tmp, TEMP dst, TEMP tmp2); |
|
5720 |
format %{ "vmulss $dst,$dst,$src2\n\t" |
|
30624 | 5721 |
"pshufd $tmp,$src2,0x01\n\t" |
34162 | 5722 |
"vmulss $dst,$dst,$tmp\n\t" |
30624 | 5723 |
"pshufd $tmp,$src2,0x02\n\t" |
34162 | 5724 |
"vmulss $dst,$dst,$tmp\n\t" |
30624 | 5725 |
"pshufd $tmp,$src2,0x03\n\t" |
34162 | 5726 |
"vmulss $dst,$dst,$tmp\n\t" |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5727 |
"vextractf32x4 $tmp2,$src2,0x1\n\t" |
34162 | 5728 |
"vmulss $dst,$dst,$tmp2\n\t" |
5729 |
"pshufd $tmp,$tmp2,0x01\n\t" |
|
5730 |
"vmulss $dst,$dst,$tmp\n\t" |
|
5731 |
"pshufd $tmp,$tmp2,0x02\n\t" |
|
5732 |
"vmulss $dst,$dst,$tmp\n\t" |
|
5733 |
"pshufd $tmp,$tmp2,0x03\n\t" |
|
5734 |
"vmulss $dst,$dst,$tmp\n\t" |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5735 |
"vextractf32x4 $tmp2,$src2,0x2\n\t" |
34162 | 5736 |
"vmulss $dst,$dst,$tmp2\n\t" |
5737 |
"pshufd $tmp,$tmp2,0x01\n\t" |
|
5738 |
"vmulss $dst,$dst,$tmp\n\t" |
|
5739 |
"pshufd $tmp,$tmp2,0x02\n\t" |
|
5740 |
"vmulss $dst,$dst,$tmp\n\t" |
|
5741 |
"pshufd $tmp,$tmp2,0x03\n\t" |
|
5742 |
"vmulss $dst,$dst,$tmp\n\t" |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5743 |
"vextractf32x4 $tmp2,$src2,0x3\n\t" |
34162 | 5744 |
"vmulss $dst,$dst,$tmp2\n\t" |
5745 |
"pshufd $tmp,$tmp2,0x01\n\t" |
|
5746 |
"vmulss $dst,$dst,$tmp\n\t" |
|
5747 |
"pshufd $tmp,$tmp2,0x02\n\t" |
|
5748 |
"vmulss $dst,$dst,$tmp\n\t" |
|
5749 |
"pshufd $tmp,$tmp2,0x03\n\t" |
|
5750 |
"vmulss $dst,$dst,$tmp\t! mul reduction16F" %} |
|
5751 |
ins_encode %{ |
|
5752 |
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $src2$$XMMRegister); |
|
30624 | 5753 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x01); |
34162 | 5754 |
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
30624 | 5755 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x02); |
34162 | 5756 |
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
30624 | 5757 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0x03); |
34162 | 5758 |
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5759 |
__ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x1); |
34162 | 5760 |
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister); |
5761 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x01); |
|
5762 |
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
|
5763 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x02); |
|
5764 |
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
|
5765 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x03); |
|
5766 |
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5767 |
__ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x2); |
34162 | 5768 |
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister); |
5769 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x01); |
|
5770 |
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
|
5771 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x02); |
|
5772 |
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
|
5773 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x03); |
|
5774 |
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5775 |
__ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x3); |
34162 | 5776 |
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister); |
5777 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x01); |
|
5778 |
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
|
5779 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x02); |
|
5780 |
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
|
5781 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0x03); |
|
5782 |
__ vmulss($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
|
5783 |
%} |
|
5784 |
ins_pipe( pipe_slow ); |
|
5785 |
%} |
|
5786 |
||
51857 | 5787 |
instruct rsmul2D_reduction_reg(regD dst, vecX src2, vecX tmp) %{ |
30211 | 5788 |
predicate(UseSSE >= 1 && UseAVX == 0); |
34162 | 5789 |
match(Set dst (MulReductionVD dst src2)); |
5790 |
effect(TEMP dst, TEMP tmp); |
|
5791 |
format %{ "mulsd $dst,$src2\n\t" |
|
5792 |
"pshufd $tmp,$src2,0xE\n\t" |
|
30624 | 5793 |
"mulsd $dst,$tmp\t! mul reduction2D" %} |
30211 | 5794 |
ins_encode %{ |
34162 | 5795 |
__ mulsd($dst$$XMMRegister, $src2$$XMMRegister); |
5796 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0xE); |
|
30211 | 5797 |
__ mulsd($dst$$XMMRegister, $tmp$$XMMRegister); |
5798 |
%} |
|
5799 |
ins_pipe( pipe_slow ); |
|
5800 |
%} |
|
5801 |
||
51857 | 5802 |
instruct rvmul2D_reduction_reg(regD dst, vecX src2, vecX tmp) %{ |
30211 | 5803 |
predicate(UseAVX > 0); |
34162 | 5804 |
match(Set dst (MulReductionVD dst src2)); |
5805 |
effect(TEMP tmp, TEMP dst); |
|
5806 |
format %{ "vmulsd $dst,$dst,$src2\n\t" |
|
30211 | 5807 |
"pshufd $tmp,$src2,0xE\n\t" |
34162 | 5808 |
"vmulsd $dst,$dst,$tmp\t! mul reduction2D" %} |
5809 |
ins_encode %{ |
|
5810 |
__ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $src2$$XMMRegister); |
|
30211 | 5811 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0xE); |
34162 | 5812 |
__ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
5813 |
%} |
|
5814 |
ins_pipe( pipe_slow ); |
|
5815 |
%} |
|
5816 |
||
51857 | 5817 |
instruct rvmul4D_reduction_reg(regD dst, vecY src2, vecY tmp, vecY tmp2) %{ |
30211 | 5818 |
predicate(UseAVX > 0); |
34162 | 5819 |
match(Set dst (MulReductionVD dst src2)); |
5820 |
effect(TEMP tmp, TEMP dst, TEMP tmp2); |
|
5821 |
format %{ "vmulsd $dst,$dst,$src2\n\t" |
|
30211 | 5822 |
"pshufd $tmp,$src2,0xE\n\t" |
34162 | 5823 |
"vmulsd $dst,$dst,$tmp\n\t" |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5824 |
"vextractf128_high $tmp2,$src2\n\t" |
34162 | 5825 |
"vmulsd $dst,$dst,$tmp2\n\t" |
5826 |
"pshufd $tmp,$tmp2,0xE\n\t" |
|
5827 |
"vmulsd $dst,$dst,$tmp\t! mul reduction4D" %} |
|
5828 |
ins_encode %{ |
|
5829 |
__ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $src2$$XMMRegister); |
|
30211 | 5830 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0xE); |
34162 | 5831 |
__ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5832 |
__ vextractf128_high($tmp2$$XMMRegister, $src2$$XMMRegister); |
34162 | 5833 |
__ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister); |
5834 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE); |
|
5835 |
__ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
|
5836 |
%} |
|
5837 |
ins_pipe( pipe_slow ); |
|
5838 |
%} |
|
5839 |
||
51857 | 5840 |
instruct rvmul8D_reduction_reg(regD dst, legVecZ src2, legVecZ tmp, legVecZ tmp2) %{ |
30624 | 5841 |
predicate(UseAVX > 2); |
34162 | 5842 |
match(Set dst (MulReductionVD dst src2)); |
5843 |
effect(TEMP tmp, TEMP dst, TEMP tmp2); |
|
5844 |
format %{ "vmulsd $dst,$dst,$src2\n\t" |
|
30624 | 5845 |
"pshufd $tmp,$src2,0xE\n\t" |
34162 | 5846 |
"vmulsd $dst,$dst,$tmp\n\t" |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5847 |
"vextractf32x4 $tmp2,$src2,0x1\n\t" |
34162 | 5848 |
"vmulsd $dst,$dst,$tmp2\n\t" |
30624 | 5849 |
"pshufd $tmp,$src2,0xE\n\t" |
34162 | 5850 |
"vmulsd $dst,$dst,$tmp\n\t" |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5851 |
"vextractf32x4 $tmp2,$src2,0x2\n\t" |
34162 | 5852 |
"vmulsd $dst,$dst,$tmp2\n\t" |
5853 |
"pshufd $tmp,$tmp2,0xE\n\t" |
|
5854 |
"vmulsd $dst,$dst,$tmp\n\t" |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5855 |
"vextractf32x4 $tmp2,$src2,0x3\n\t" |
34162 | 5856 |
"vmulsd $dst,$dst,$tmp2\n\t" |
5857 |
"pshufd $tmp,$tmp2,0xE\n\t" |
|
5858 |
"vmulsd $dst,$dst,$tmp\t! mul reduction8D" %} |
|
5859 |
ins_encode %{ |
|
5860 |
__ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $src2$$XMMRegister); |
|
30624 | 5861 |
__ pshufd($tmp$$XMMRegister, $src2$$XMMRegister, 0xE); |
34162 | 5862 |
__ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5863 |
__ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x1); |
34162 | 5864 |
__ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister); |
5865 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE); |
|
5866 |
__ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5867 |
__ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x2); |
34162 | 5868 |
__ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister); |
5869 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE); |
|
5870 |
__ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
35581
diff
changeset
|
5871 |
__ vextractf32x4($tmp2$$XMMRegister, $src2$$XMMRegister, 0x3); |
34162 | 5872 |
__ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister); |
5873 |
__ pshufd($tmp$$XMMRegister, $tmp2$$XMMRegister, 0xE); |
|
5874 |
__ vmulsd($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister); |
|
30624 | 5875 |
%} |
5876 |
ins_pipe( pipe_slow ); |
|
5877 |
%} |
|
5878 |
||
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5879 |
// ====================VECTOR ARITHMETIC======================================= |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5880 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5881 |
// --------------------------------- ADD -------------------------------------- |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5882 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5883 |
// Bytes vector add |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5884 |
instruct vadd4B(vecS dst, vecS src) %{ |
34162 | 5885 |
predicate(UseAVX == 0 && n->as_Vector()->length() == 4); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5886 |
match(Set dst (AddVB dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5887 |
format %{ "paddb $dst,$src\t! add packed4B" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5888 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5889 |
__ paddb($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5890 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5891 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5892 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5893 |
|
51857 | 5894 |
instruct vadd4B_reg(vecS dst, vecS src1, vecS src2) %{ |
5895 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5896 |
match(Set dst (AddVB src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5897 |
format %{ "vpaddb $dst,$src1,$src2\t! add packed4B" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5898 |
ins_encode %{ |
30624 | 5899 |
int vector_len = 0; |
5900 |
__ vpaddb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5901 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5902 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5903 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5904 |
|
51857 | 5905 |
|
5906 |
instruct vadd4B_mem(vecS dst, vecS src, memory mem) %{ |
|
5907 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
|
31410 | 5908 |
match(Set dst (AddVB src (LoadVector mem))); |
5909 |
format %{ "vpaddb $dst,$src,$mem\t! add packed4B" %} |
|
5910 |
ins_encode %{ |
|
5911 |
int vector_len = 0; |
|
5912 |
__ vpaddb($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
5913 |
%} |
|
5914 |
ins_pipe( pipe_slow ); |
|
5915 |
%} |
|
5916 |
||
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5917 |
instruct vadd8B(vecD dst, vecD src) %{ |
34162 | 5918 |
predicate(UseAVX == 0 && n->as_Vector()->length() == 8); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5919 |
match(Set dst (AddVB dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5920 |
format %{ "paddb $dst,$src\t! add packed8B" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5921 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5922 |
__ paddb($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5923 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5924 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5925 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5926 |
|
51857 | 5927 |
instruct vadd8B_reg(vecD dst, vecD src1, vecD src2) %{ |
5928 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5929 |
match(Set dst (AddVB src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5930 |
format %{ "vpaddb $dst,$src1,$src2\t! add packed8B" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5931 |
ins_encode %{ |
30624 | 5932 |
int vector_len = 0; |
5933 |
__ vpaddb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5934 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5935 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5936 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5937 |
|
51857 | 5938 |
|
5939 |
instruct vadd8B_mem(vecD dst, vecD src, memory mem) %{ |
|
5940 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
|
31410 | 5941 |
match(Set dst (AddVB src (LoadVector mem))); |
5942 |
format %{ "vpaddb $dst,$src,$mem\t! add packed8B" %} |
|
5943 |
ins_encode %{ |
|
5944 |
int vector_len = 0; |
|
5945 |
__ vpaddb($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
5946 |
%} |
|
5947 |
ins_pipe( pipe_slow ); |
|
5948 |
%} |
|
5949 |
||
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5950 |
instruct vadd16B(vecX dst, vecX src) %{ |
34162 | 5951 |
predicate(UseAVX == 0 && n->as_Vector()->length() == 16); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5952 |
match(Set dst (AddVB dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5953 |
format %{ "paddb $dst,$src\t! add packed16B" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5954 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5955 |
__ paddb($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5956 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5957 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5958 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5959 |
|
51857 | 5960 |
instruct vadd16B_reg(vecX dst, vecX src1, vecX src2) %{ |
5961 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 16); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5962 |
match(Set dst (AddVB src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5963 |
format %{ "vpaddb $dst,$src1,$src2\t! add packed16B" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5964 |
ins_encode %{ |
30624 | 5965 |
int vector_len = 0; |
5966 |
__ vpaddb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5967 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5968 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5969 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5970 |
|
51857 | 5971 |
instruct vadd16B_mem(vecX dst, vecX src, memory mem) %{ |
5972 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 16); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5973 |
match(Set dst (AddVB src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5974 |
format %{ "vpaddb $dst,$src,$mem\t! add packed16B" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5975 |
ins_encode %{ |
30624 | 5976 |
int vector_len = 0; |
5977 |
__ vpaddb($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5978 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5979 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5980 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5981 |
|
51857 | 5982 |
instruct vadd32B_reg(vecY dst, vecY src1, vecY src2) %{ |
5983 |
predicate(UseAVX > 1 && n->as_Vector()->length() == 32); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5984 |
match(Set dst (AddVB src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5985 |
format %{ "vpaddb $dst,$src1,$src2\t! add packed32B" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5986 |
ins_encode %{ |
30624 | 5987 |
int vector_len = 1; |
5988 |
__ vpaddb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5989 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5990 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5991 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5992 |
|
51857 | 5993 |
instruct vadd32B_mem(vecY dst, vecY src, memory mem) %{ |
5994 |
predicate(UseAVX > 1 && n->as_Vector()->length() == 32); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5995 |
match(Set dst (AddVB src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5996 |
format %{ "vpaddb $dst,$src,$mem\t! add packed32B" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
5997 |
ins_encode %{ |
30624 | 5998 |
int vector_len = 1; |
5999 |
__ vpaddb($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
6000 |
%} |
|
6001 |
ins_pipe( pipe_slow ); |
|
6002 |
%} |
|
6003 |
||
6004 |
instruct vadd64B_reg(vecZ dst, vecZ src1, vecZ src2) %{ |
|
51857 | 6005 |
predicate(UseAVX > 2 && VM_Version::supports_avx512bw() && n->as_Vector()->length() == 64); |
30624 | 6006 |
match(Set dst (AddVB src1 src2)); |
6007 |
format %{ "vpaddb $dst,$src1,$src2\t! add packed64B" %} |
|
6008 |
ins_encode %{ |
|
6009 |
int vector_len = 2; |
|
6010 |
__ vpaddb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
6011 |
%} |
|
6012 |
ins_pipe( pipe_slow ); |
|
6013 |
%} |
|
6014 |
||
6015 |
instruct vadd64B_mem(vecZ dst, vecZ src, memory mem) %{ |
|
51857 | 6016 |
predicate(UseAVX > 2 && VM_Version::supports_avx512bw() && n->as_Vector()->length() == 64); |
30624 | 6017 |
match(Set dst (AddVB src (LoadVector mem))); |
6018 |
format %{ "vpaddb $dst,$src,$mem\t! add packed64B" %} |
|
6019 |
ins_encode %{ |
|
6020 |
int vector_len = 2; |
|
6021 |
__ vpaddb($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6022 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6023 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6024 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6025 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6026 |
// Shorts/Chars vector add |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6027 |
instruct vadd2S(vecS dst, vecS src) %{ |
34162 | 6028 |
predicate(UseAVX == 0 && n->as_Vector()->length() == 2); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6029 |
match(Set dst (AddVS dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6030 |
format %{ "paddw $dst,$src\t! add packed2S" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6031 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6032 |
__ paddw($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6033 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6034 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6035 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6036 |
|
51857 | 6037 |
instruct vadd2S_reg(vecS dst, vecS src1, vecS src2) %{ |
6038 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6039 |
match(Set dst (AddVS src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6040 |
format %{ "vpaddw $dst,$src1,$src2\t! add packed2S" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6041 |
ins_encode %{ |
30624 | 6042 |
int vector_len = 0; |
6043 |
__ vpaddw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6044 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6045 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6046 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6047 |
|
51857 | 6048 |
instruct vadd2S_mem(vecS dst, vecS src, memory mem) %{ |
6049 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
|
31410 | 6050 |
match(Set dst (AddVS src (LoadVector mem))); |
6051 |
format %{ "vpaddw $dst,$src,$mem\t! add packed2S" %} |
|
6052 |
ins_encode %{ |
|
6053 |
int vector_len = 0; |
|
6054 |
__ vpaddw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
6055 |
%} |
|
6056 |
ins_pipe( pipe_slow ); |
|
6057 |
%} |
|
6058 |
||
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6059 |
instruct vadd4S(vecD dst, vecD src) %{ |
34162 | 6060 |
predicate(UseAVX == 0 && n->as_Vector()->length() == 4); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6061 |
match(Set dst (AddVS dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6062 |
format %{ "paddw $dst,$src\t! add packed4S" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6063 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6064 |
__ paddw($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6065 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6066 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6067 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6068 |
|
51857 | 6069 |
instruct vadd4S_reg(vecD dst, vecD src1, vecD src2) %{ |
6070 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6071 |
match(Set dst (AddVS src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6072 |
format %{ "vpaddw $dst,$src1,$src2\t! add packed4S" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6073 |
ins_encode %{ |
30624 | 6074 |
int vector_len = 0; |
6075 |
__ vpaddw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6076 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6077 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6078 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6079 |
|
51857 | 6080 |
instruct vadd4S_mem(vecD dst, vecD src, memory mem) %{ |
6081 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
|
31410 | 6082 |
match(Set dst (AddVS src (LoadVector mem))); |
6083 |
format %{ "vpaddw $dst,$src,$mem\t! add packed4S" %} |
|
6084 |
ins_encode %{ |
|
6085 |
int vector_len = 0; |
|
6086 |
__ vpaddw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
6087 |
%} |
|
6088 |
ins_pipe( pipe_slow ); |
|
6089 |
%} |
|
6090 |
||
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6091 |
instruct vadd8S(vecX dst, vecX src) %{ |
34162 | 6092 |
predicate(UseAVX == 0 && n->as_Vector()->length() == 8); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6093 |
match(Set dst (AddVS dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6094 |
format %{ "paddw $dst,$src\t! add packed8S" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6095 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6096 |
__ paddw($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6097 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6098 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6099 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6100 |
|
51857 | 6101 |
instruct vadd8S_reg(vecX dst, vecX src1, vecX src2) %{ |
6102 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6103 |
match(Set dst (AddVS src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6104 |
format %{ "vpaddw $dst,$src1,$src2\t! add packed8S" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6105 |
ins_encode %{ |
30624 | 6106 |
int vector_len = 0; |
6107 |
__ vpaddw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6108 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6109 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6110 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6111 |
|
51857 | 6112 |
instruct vadd8S_mem(vecX dst, vecX src, memory mem) %{ |
6113 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6114 |
match(Set dst (AddVS src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6115 |
format %{ "vpaddw $dst,$src,$mem\t! add packed8S" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6116 |
ins_encode %{ |
30624 | 6117 |
int vector_len = 0; |
6118 |
__ vpaddw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6119 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6120 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6121 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6122 |
|
51857 | 6123 |
instruct vadd16S_reg(vecY dst, vecY src1, vecY src2) %{ |
6124 |
predicate(UseAVX > 1 && n->as_Vector()->length() == 16); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6125 |
match(Set dst (AddVS src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6126 |
format %{ "vpaddw $dst,$src1,$src2\t! add packed16S" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6127 |
ins_encode %{ |
30624 | 6128 |
int vector_len = 1; |
6129 |
__ vpaddw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6130 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6131 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6132 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6133 |
|
51857 | 6134 |
instruct vadd16S_mem(vecY dst, vecY src, memory mem) %{ |
6135 |
predicate(UseAVX > 1 && n->as_Vector()->length() == 16); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6136 |
match(Set dst (AddVS src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6137 |
format %{ "vpaddw $dst,$src,$mem\t! add packed16S" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6138 |
ins_encode %{ |
30624 | 6139 |
int vector_len = 1; |
6140 |
__ vpaddw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
6141 |
%} |
|
6142 |
ins_pipe( pipe_slow ); |
|
6143 |
%} |
|
6144 |
||
6145 |
instruct vadd32S_reg(vecZ dst, vecZ src1, vecZ src2) %{ |
|
51857 | 6146 |
predicate(UseAVX > 2 && VM_Version::supports_avx512bw() && n->as_Vector()->length() == 32); |
30624 | 6147 |
match(Set dst (AddVS src1 src2)); |
6148 |
format %{ "vpaddw $dst,$src1,$src2\t! add packed32S" %} |
|
6149 |
ins_encode %{ |
|
6150 |
int vector_len = 2; |
|
6151 |
__ vpaddw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
6152 |
%} |
|
6153 |
ins_pipe( pipe_slow ); |
|
6154 |
%} |
|
6155 |
||
6156 |
instruct vadd32S_mem(vecZ dst, vecZ src, memory mem) %{ |
|
51857 | 6157 |
predicate(UseAVX > 2 && VM_Version::supports_avx512bw() && n->as_Vector()->length() == 32); |
30624 | 6158 |
match(Set dst (AddVS src (LoadVector mem))); |
6159 |
format %{ "vpaddw $dst,$src,$mem\t! add packed32S" %} |
|
6160 |
ins_encode %{ |
|
6161 |
int vector_len = 2; |
|
6162 |
__ vpaddw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6163 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6164 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6165 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6166 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6167 |
// Integers vector add |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6168 |
instruct vadd2I(vecD dst, vecD src) %{ |
51857 | 6169 |
predicate(UseAVX == 0 && n->as_Vector()->length() == 2); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6170 |
match(Set dst (AddVI dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6171 |
format %{ "paddd $dst,$src\t! add packed2I" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6172 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6173 |
__ paddd($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6174 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6175 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6176 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6177 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6178 |
instruct vadd2I_reg(vecD dst, vecD src1, vecD src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6179 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6180 |
match(Set dst (AddVI src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6181 |
format %{ "vpaddd $dst,$src1,$src2\t! add packed2I" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6182 |
ins_encode %{ |
30624 | 6183 |
int vector_len = 0; |
6184 |
__ vpaddd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6185 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6186 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6187 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6188 |
|
31410 | 6189 |
instruct vadd2I_mem(vecD dst, vecD src, memory mem) %{ |
6190 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
|
6191 |
match(Set dst (AddVI src (LoadVector mem))); |
|
6192 |
format %{ "vpaddd $dst,$src,$mem\t! add packed2I" %} |
|
6193 |
ins_encode %{ |
|
6194 |
int vector_len = 0; |
|
6195 |
__ vpaddd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
6196 |
%} |
|
6197 |
ins_pipe( pipe_slow ); |
|
6198 |
%} |
|
6199 |
||
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6200 |
instruct vadd4I(vecX dst, vecX src) %{ |
51857 | 6201 |
predicate(UseAVX == 0 && n->as_Vector()->length() == 4); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6202 |
match(Set dst (AddVI dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6203 |
format %{ "paddd $dst,$src\t! add packed4I" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6204 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6205 |
__ paddd($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6206 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6207 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6208 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6209 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6210 |
instruct vadd4I_reg(vecX dst, vecX src1, vecX src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6211 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6212 |
match(Set dst (AddVI src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6213 |
format %{ "vpaddd $dst,$src1,$src2\t! add packed4I" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6214 |
ins_encode %{ |
30624 | 6215 |
int vector_len = 0; |
6216 |
__ vpaddd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6217 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6218 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6219 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6220 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6221 |
instruct vadd4I_mem(vecX dst, vecX src, memory mem) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6222 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6223 |
match(Set dst (AddVI src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6224 |
format %{ "vpaddd $dst,$src,$mem\t! add packed4I" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6225 |
ins_encode %{ |
30624 | 6226 |
int vector_len = 0; |
6227 |
__ vpaddd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6228 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6229 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6230 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6231 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6232 |
instruct vadd8I_reg(vecY dst, vecY src1, vecY src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6233 |
predicate(UseAVX > 1 && n->as_Vector()->length() == 8); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6234 |
match(Set dst (AddVI src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6235 |
format %{ "vpaddd $dst,$src1,$src2\t! add packed8I" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6236 |
ins_encode %{ |
30624 | 6237 |
int vector_len = 1; |
6238 |
__ vpaddd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6239 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6240 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6241 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6242 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6243 |
instruct vadd8I_mem(vecY dst, vecY src, memory mem) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6244 |
predicate(UseAVX > 1 && n->as_Vector()->length() == 8); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6245 |
match(Set dst (AddVI src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6246 |
format %{ "vpaddd $dst,$src,$mem\t! add packed8I" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6247 |
ins_encode %{ |
30624 | 6248 |
int vector_len = 1; |
6249 |
__ vpaddd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
6250 |
%} |
|
6251 |
ins_pipe( pipe_slow ); |
|
6252 |
%} |
|
6253 |
||
6254 |
instruct vadd16I_reg(vecZ dst, vecZ src1, vecZ src2) %{ |
|
6255 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 16); |
|
6256 |
match(Set dst (AddVI src1 src2)); |
|
6257 |
format %{ "vpaddd $dst,$src1,$src2\t! add packed16I" %} |
|
6258 |
ins_encode %{ |
|
6259 |
int vector_len = 2; |
|
6260 |
__ vpaddd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
6261 |
%} |
|
6262 |
ins_pipe( pipe_slow ); |
|
6263 |
%} |
|
6264 |
||
6265 |
instruct vadd16I_mem(vecZ dst, vecZ src, memory mem) %{ |
|
6266 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 16); |
|
6267 |
match(Set dst (AddVI src (LoadVector mem))); |
|
6268 |
format %{ "vpaddd $dst,$src,$mem\t! add packed16I" %} |
|
6269 |
ins_encode %{ |
|
6270 |
int vector_len = 2; |
|
6271 |
__ vpaddd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6272 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6273 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6274 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6275 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6276 |
// Longs vector add |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6277 |
instruct vadd2L(vecX dst, vecX src) %{ |
51857 | 6278 |
predicate(UseAVX == 0 && n->as_Vector()->length() == 2); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6279 |
match(Set dst (AddVL dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6280 |
format %{ "paddq $dst,$src\t! add packed2L" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6281 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6282 |
__ paddq($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6283 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6284 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6285 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6286 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6287 |
instruct vadd2L_reg(vecX dst, vecX src1, vecX src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6288 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6289 |
match(Set dst (AddVL src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6290 |
format %{ "vpaddq $dst,$src1,$src2\t! add packed2L" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6291 |
ins_encode %{ |
30624 | 6292 |
int vector_len = 0; |
6293 |
__ vpaddq($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6294 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6295 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6296 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6297 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6298 |
instruct vadd2L_mem(vecX dst, vecX src, memory mem) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6299 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6300 |
match(Set dst (AddVL src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6301 |
format %{ "vpaddq $dst,$src,$mem\t! add packed2L" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6302 |
ins_encode %{ |
30624 | 6303 |
int vector_len = 0; |
6304 |
__ vpaddq($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6305 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6306 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6307 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6308 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6309 |
instruct vadd4L_reg(vecY dst, vecY src1, vecY src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6310 |
predicate(UseAVX > 1 && n->as_Vector()->length() == 4); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6311 |
match(Set dst (AddVL src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6312 |
format %{ "vpaddq $dst,$src1,$src2\t! add packed4L" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6313 |
ins_encode %{ |
30624 | 6314 |
int vector_len = 1; |
6315 |
__ vpaddq($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6316 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6317 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6318 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6319 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6320 |
instruct vadd4L_mem(vecY dst, vecY src, memory mem) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6321 |
predicate(UseAVX > 1 && n->as_Vector()->length() == 4); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6322 |
match(Set dst (AddVL src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6323 |
format %{ "vpaddq $dst,$src,$mem\t! add packed4L" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6324 |
ins_encode %{ |
30624 | 6325 |
int vector_len = 1; |
6326 |
__ vpaddq($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
6327 |
%} |
|
6328 |
ins_pipe( pipe_slow ); |
|
6329 |
%} |
|
6330 |
||
6331 |
instruct vadd8L_reg(vecZ dst, vecZ src1, vecZ src2) %{ |
|
6332 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 8); |
|
6333 |
match(Set dst (AddVL src1 src2)); |
|
6334 |
format %{ "vpaddq $dst,$src1,$src2\t! add packed8L" %} |
|
6335 |
ins_encode %{ |
|
6336 |
int vector_len = 2; |
|
6337 |
__ vpaddq($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
6338 |
%} |
|
6339 |
ins_pipe( pipe_slow ); |
|
6340 |
%} |
|
6341 |
||
6342 |
instruct vadd8L_mem(vecZ dst, vecZ src, memory mem) %{ |
|
6343 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 8); |
|
6344 |
match(Set dst (AddVL src (LoadVector mem))); |
|
6345 |
format %{ "vpaddq $dst,$src,$mem\t! add packed8L" %} |
|
6346 |
ins_encode %{ |
|
6347 |
int vector_len = 2; |
|
6348 |
__ vpaddq($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6349 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6350 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6351 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6352 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6353 |
// Floats vector add |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6354 |
instruct vadd2F(vecD dst, vecD src) %{ |
51857 | 6355 |
predicate(UseAVX == 0 && n->as_Vector()->length() == 2); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6356 |
match(Set dst (AddVF dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6357 |
format %{ "addps $dst,$src\t! add packed2F" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6358 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6359 |
__ addps($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6360 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6361 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6362 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6363 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6364 |
instruct vadd2F_reg(vecD dst, vecD src1, vecD src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6365 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6366 |
match(Set dst (AddVF src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6367 |
format %{ "vaddps $dst,$src1,$src2\t! add packed2F" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6368 |
ins_encode %{ |
30624 | 6369 |
int vector_len = 0; |
6370 |
__ vaddps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6371 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6372 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6373 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6374 |
|
31410 | 6375 |
instruct vadd2F_mem(vecD dst, vecD src, memory mem) %{ |
6376 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
|
6377 |
match(Set dst (AddVF src (LoadVector mem))); |
|
6378 |
format %{ "vaddps $dst,$src,$mem\t! add packed2F" %} |
|
6379 |
ins_encode %{ |
|
6380 |
int vector_len = 0; |
|
6381 |
__ vaddps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
6382 |
%} |
|
6383 |
ins_pipe( pipe_slow ); |
|
6384 |
%} |
|
6385 |
||
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6386 |
instruct vadd4F(vecX dst, vecX src) %{ |
51857 | 6387 |
predicate(UseAVX == 0 && n->as_Vector()->length() == 4); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6388 |
match(Set dst (AddVF dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6389 |
format %{ "addps $dst,$src\t! add packed4F" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6390 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6391 |
__ addps($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6392 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6393 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6394 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6395 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6396 |
instruct vadd4F_reg(vecX dst, vecX src1, vecX src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6397 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6398 |
match(Set dst (AddVF src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6399 |
format %{ "vaddps $dst,$src1,$src2\t! add packed4F" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6400 |
ins_encode %{ |
30624 | 6401 |
int vector_len = 0; |
6402 |
__ vaddps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6403 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6404 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6405 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6406 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6407 |
instruct vadd4F_mem(vecX dst, vecX src, memory mem) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6408 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6409 |
match(Set dst (AddVF src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6410 |
format %{ "vaddps $dst,$src,$mem\t! add packed4F" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6411 |
ins_encode %{ |
30624 | 6412 |
int vector_len = 0; |
6413 |
__ vaddps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6414 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6415 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6416 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6417 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6418 |
instruct vadd8F_reg(vecY dst, vecY src1, vecY src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6419 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6420 |
match(Set dst (AddVF src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6421 |
format %{ "vaddps $dst,$src1,$src2\t! add packed8F" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6422 |
ins_encode %{ |
30624 | 6423 |
int vector_len = 1; |
6424 |
__ vaddps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6425 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6426 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6427 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6428 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6429 |
instruct vadd8F_mem(vecY dst, vecY src, memory mem) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6430 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6431 |
match(Set dst (AddVF src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6432 |
format %{ "vaddps $dst,$src,$mem\t! add packed8F" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6433 |
ins_encode %{ |
30624 | 6434 |
int vector_len = 1; |
6435 |
__ vaddps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
6436 |
%} |
|
6437 |
ins_pipe( pipe_slow ); |
|
6438 |
%} |
|
6439 |
||
6440 |
instruct vadd16F_reg(vecZ dst, vecZ src1, vecZ src2) %{ |
|
6441 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 16); |
|
6442 |
match(Set dst (AddVF src1 src2)); |
|
6443 |
format %{ "vaddps $dst,$src1,$src2\t! add packed16F" %} |
|
6444 |
ins_encode %{ |
|
6445 |
int vector_len = 2; |
|
6446 |
__ vaddps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
6447 |
%} |
|
6448 |
ins_pipe( pipe_slow ); |
|
6449 |
%} |
|
6450 |
||
6451 |
instruct vadd16F_mem(vecZ dst, vecZ src, memory mem) %{ |
|
6452 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 16); |
|
6453 |
match(Set dst (AddVF src (LoadVector mem))); |
|
6454 |
format %{ "vaddps $dst,$src,$mem\t! add packed16F" %} |
|
6455 |
ins_encode %{ |
|
6456 |
int vector_len = 2; |
|
6457 |
__ vaddps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6458 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6459 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6460 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6461 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6462 |
// Doubles vector add |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6463 |
instruct vadd2D(vecX dst, vecX src) %{ |
51857 | 6464 |
predicate(UseAVX == 0 && n->as_Vector()->length() == 2); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6465 |
match(Set dst (AddVD dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6466 |
format %{ "addpd $dst,$src\t! add packed2D" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6467 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6468 |
__ addpd($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6469 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6470 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6471 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6472 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6473 |
instruct vadd2D_reg(vecX dst, vecX src1, vecX src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6474 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6475 |
match(Set dst (AddVD src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6476 |
format %{ "vaddpd $dst,$src1,$src2\t! add packed2D" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6477 |
ins_encode %{ |
30624 | 6478 |
int vector_len = 0; |
6479 |
__ vaddpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6480 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6481 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6482 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6483 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6484 |
instruct vadd2D_mem(vecX dst, vecX src, memory mem) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6485 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6486 |
match(Set dst (AddVD src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6487 |
format %{ "vaddpd $dst,$src,$mem\t! add packed2D" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6488 |
ins_encode %{ |
30624 | 6489 |
int vector_len = 0; |
6490 |
__ vaddpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6491 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6492 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6493 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6494 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6495 |
instruct vadd4D_reg(vecY dst, vecY src1, vecY src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6496 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6497 |
match(Set dst (AddVD src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6498 |
format %{ "vaddpd $dst,$src1,$src2\t! add packed4D" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6499 |
ins_encode %{ |
30624 | 6500 |
int vector_len = 1; |
6501 |
__ vaddpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6502 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6503 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6504 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6505 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6506 |
instruct vadd4D_mem(vecY dst, vecY src, memory mem) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6507 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6508 |
match(Set dst (AddVD src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6509 |
format %{ "vaddpd $dst,$src,$mem\t! add packed4D" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6510 |
ins_encode %{ |
30624 | 6511 |
int vector_len = 1; |
6512 |
__ vaddpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
6513 |
%} |
|
6514 |
ins_pipe( pipe_slow ); |
|
6515 |
%} |
|
6516 |
||
6517 |
instruct vadd8D_reg(vecZ dst, vecZ src1, vecZ src2) %{ |
|
6518 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 8); |
|
6519 |
match(Set dst (AddVD src1 src2)); |
|
6520 |
format %{ "vaddpd $dst,$src1,$src2\t! add packed8D" %} |
|
6521 |
ins_encode %{ |
|
6522 |
int vector_len = 2; |
|
6523 |
__ vaddpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
6524 |
%} |
|
6525 |
ins_pipe( pipe_slow ); |
|
6526 |
%} |
|
6527 |
||
6528 |
instruct vadd8D_mem(vecZ dst, vecZ src, memory mem) %{ |
|
6529 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 8); |
|
6530 |
match(Set dst (AddVD src (LoadVector mem))); |
|
6531 |
format %{ "vaddpd $dst,$src,$mem\t! add packed8D" %} |
|
6532 |
ins_encode %{ |
|
6533 |
int vector_len = 2; |
|
6534 |
__ vaddpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6535 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6536 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6537 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6538 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6539 |
// --------------------------------- SUB -------------------------------------- |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6540 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6541 |
// Bytes vector sub |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6542 |
instruct vsub4B(vecS dst, vecS src) %{ |
34162 | 6543 |
predicate(UseAVX == 0 && n->as_Vector()->length() == 4); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6544 |
match(Set dst (SubVB dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6545 |
format %{ "psubb $dst,$src\t! sub packed4B" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6546 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6547 |
__ psubb($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6548 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6549 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6550 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6551 |
|
51857 | 6552 |
instruct vsub4B_reg(vecS dst, vecS src1, vecS src2) %{ |
6553 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6554 |
match(Set dst (SubVB src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6555 |
format %{ "vpsubb $dst,$src1,$src2\t! sub packed4B" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6556 |
ins_encode %{ |
30624 | 6557 |
int vector_len = 0; |
6558 |
__ vpsubb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6559 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6560 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6561 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6562 |
|
51857 | 6563 |
instruct vsub4B_mem(vecS dst, vecS src, memory mem) %{ |
6564 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
|
31410 | 6565 |
match(Set dst (SubVB src (LoadVector mem))); |
6566 |
format %{ "vpsubb $dst,$src,$mem\t! sub packed4B" %} |
|
6567 |
ins_encode %{ |
|
6568 |
int vector_len = 0; |
|
6569 |
__ vpsubb($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
6570 |
%} |
|
6571 |
ins_pipe( pipe_slow ); |
|
6572 |
%} |
|
6573 |
||
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6574 |
instruct vsub8B(vecD dst, vecD src) %{ |
34162 | 6575 |
predicate(UseAVX == 0 && n->as_Vector()->length() == 8); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6576 |
match(Set dst (SubVB dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6577 |
format %{ "psubb $dst,$src\t! sub packed8B" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6578 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6579 |
__ psubb($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6580 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6581 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6582 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6583 |
|
51857 | 6584 |
instruct vsub8B_reg(vecD dst, vecD src1, vecD src2) %{ |
6585 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6586 |
match(Set dst (SubVB src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6587 |
format %{ "vpsubb $dst,$src1,$src2\t! sub packed8B" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6588 |
ins_encode %{ |
30624 | 6589 |
int vector_len = 0; |
6590 |
__ vpsubb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6591 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6592 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6593 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6594 |
|
51857 | 6595 |
instruct vsub8B_mem(vecD dst, vecD src, memory mem) %{ |
6596 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
|
31410 | 6597 |
match(Set dst (SubVB src (LoadVector mem))); |
6598 |
format %{ "vpsubb $dst,$src,$mem\t! sub packed8B" %} |
|
6599 |
ins_encode %{ |
|
6600 |
int vector_len = 0; |
|
6601 |
__ vpsubb($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
6602 |
%} |
|
6603 |
ins_pipe( pipe_slow ); |
|
6604 |
%} |
|
6605 |
||
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6606 |
instruct vsub16B(vecX dst, vecX src) %{ |
34162 | 6607 |
predicate(UseAVX == 0 && n->as_Vector()->length() == 16); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6608 |
match(Set dst (SubVB dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6609 |
format %{ "psubb $dst,$src\t! sub packed16B" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6610 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6611 |
__ psubb($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6612 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6613 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6614 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6615 |
|
51857 | 6616 |
instruct vsub16B_reg(vecX dst, vecX src1, vecX src2) %{ |
6617 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 16); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6618 |
match(Set dst (SubVB src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6619 |
format %{ "vpsubb $dst,$src1,$src2\t! sub packed16B" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6620 |
ins_encode %{ |
30624 | 6621 |
int vector_len = 0; |
6622 |
__ vpsubb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6623 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6624 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6625 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6626 |
|
51857 | 6627 |
instruct vsub16B_mem(vecX dst, vecX src, memory mem) %{ |
6628 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 16); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6629 |
match(Set dst (SubVB src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6630 |
format %{ "vpsubb $dst,$src,$mem\t! sub packed16B" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6631 |
ins_encode %{ |
30624 | 6632 |
int vector_len = 0; |
6633 |
__ vpsubb($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6634 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6635 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6636 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6637 |
|
51857 | 6638 |
instruct vsub32B_reg(vecY dst, vecY src1, vecY src2) %{ |
6639 |
predicate(UseAVX > 1 && n->as_Vector()->length() == 32); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6640 |
match(Set dst (SubVB src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6641 |
format %{ "vpsubb $dst,$src1,$src2\t! sub packed32B" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6642 |
ins_encode %{ |
30624 | 6643 |
int vector_len = 1; |
6644 |
__ vpsubb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6645 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6646 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6647 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6648 |
|
51857 | 6649 |
instruct vsub32B_mem(vecY dst, vecY src, memory mem) %{ |
6650 |
predicate(UseAVX > 1 && n->as_Vector()->length() == 32); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6651 |
match(Set dst (SubVB src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6652 |
format %{ "vpsubb $dst,$src,$mem\t! sub packed32B" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6653 |
ins_encode %{ |
30624 | 6654 |
int vector_len = 1; |
6655 |
__ vpsubb($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
6656 |
%} |
|
6657 |
ins_pipe( pipe_slow ); |
|
6658 |
%} |
|
6659 |
||
6660 |
instruct vsub64B_reg(vecZ dst, vecZ src1, vecZ src2) %{ |
|
51857 | 6661 |
predicate(UseAVX > 2 && VM_Version::supports_avx512bw() && n->as_Vector()->length() == 64); |
30624 | 6662 |
match(Set dst (SubVB src1 src2)); |
6663 |
format %{ "vpsubb $dst,$src1,$src2\t! sub packed64B" %} |
|
6664 |
ins_encode %{ |
|
6665 |
int vector_len = 2; |
|
6666 |
__ vpsubb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
6667 |
%} |
|
6668 |
ins_pipe( pipe_slow ); |
|
6669 |
%} |
|
6670 |
||
6671 |
instruct vsub64B_mem(vecZ dst, vecZ src, memory mem) %{ |
|
51857 | 6672 |
predicate(UseAVX > 2 && VM_Version::supports_avx512bw() && n->as_Vector()->length() == 64); |
30624 | 6673 |
match(Set dst (SubVB src (LoadVector mem))); |
6674 |
format %{ "vpsubb $dst,$src,$mem\t! sub packed64B" %} |
|
6675 |
ins_encode %{ |
|
6676 |
int vector_len = 2; |
|
6677 |
__ vpsubb($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6678 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6679 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6680 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6681 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6682 |
// Shorts/Chars vector sub |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6683 |
instruct vsub2S(vecS dst, vecS src) %{ |
34162 | 6684 |
predicate(UseAVX == 0 && n->as_Vector()->length() == 2); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6685 |
match(Set dst (SubVS dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6686 |
format %{ "psubw $dst,$src\t! sub packed2S" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6687 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6688 |
__ psubw($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6689 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6690 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6691 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6692 |
|
51857 | 6693 |
instruct vsub2S_reg(vecS dst, vecS src1, vecS src2) %{ |
6694 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6695 |
match(Set dst (SubVS src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6696 |
format %{ "vpsubw $dst,$src1,$src2\t! sub packed2S" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6697 |
ins_encode %{ |
30624 | 6698 |
int vector_len = 0; |
6699 |
__ vpsubw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6700 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6701 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6702 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6703 |
|
51857 | 6704 |
instruct vsub2S_mem(vecS dst, vecS src, memory mem) %{ |
6705 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
|
31410 | 6706 |
match(Set dst (SubVS src (LoadVector mem))); |
6707 |
format %{ "vpsubw $dst,$src,$mem\t! sub packed2S" %} |
|
6708 |
ins_encode %{ |
|
6709 |
int vector_len = 0; |
|
6710 |
__ vpsubw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
6711 |
%} |
|
6712 |
ins_pipe( pipe_slow ); |
|
6713 |
%} |
|
6714 |
||
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6715 |
instruct vsub4S(vecD dst, vecD src) %{ |
34162 | 6716 |
predicate(UseAVX == 0 && n->as_Vector()->length() == 4); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6717 |
match(Set dst (SubVS dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6718 |
format %{ "psubw $dst,$src\t! sub packed4S" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6719 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6720 |
__ psubw($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6721 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6722 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6723 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6724 |
|
51857 | 6725 |
instruct vsub4S_reg(vecD dst, vecD src1, vecD src2) %{ |
6726 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6727 |
match(Set dst (SubVS src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6728 |
format %{ "vpsubw $dst,$src1,$src2\t! sub packed4S" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6729 |
ins_encode %{ |
30624 | 6730 |
int vector_len = 0; |
6731 |
__ vpsubw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6732 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6733 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6734 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6735 |
|
51857 | 6736 |
instruct vsub4S_mem(vecD dst, vecD src, memory mem) %{ |
6737 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
|
31410 | 6738 |
match(Set dst (SubVS src (LoadVector mem))); |
6739 |
format %{ "vpsubw $dst,$src,$mem\t! sub packed4S" %} |
|
6740 |
ins_encode %{ |
|
6741 |
int vector_len = 0; |
|
6742 |
__ vpsubw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
6743 |
%} |
|
6744 |
ins_pipe( pipe_slow ); |
|
6745 |
%} |
|
6746 |
||
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6747 |
instruct vsub8S(vecX dst, vecX src) %{ |
34162 | 6748 |
predicate(UseAVX == 0 && n->as_Vector()->length() == 8); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6749 |
match(Set dst (SubVS dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6750 |
format %{ "psubw $dst,$src\t! sub packed8S" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6751 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6752 |
__ psubw($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6753 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6754 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6755 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6756 |
|
51857 | 6757 |
instruct vsub8S_reg(vecX dst, vecX src1, vecX src2) %{ |
6758 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6759 |
match(Set dst (SubVS src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6760 |
format %{ "vpsubw $dst,$src1,$src2\t! sub packed8S" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6761 |
ins_encode %{ |
30624 | 6762 |
int vector_len = 0; |
6763 |
__ vpsubw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6764 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6765 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6766 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6767 |
|
51857 | 6768 |
instruct vsub8S_mem(vecX dst, vecX src, memory mem) %{ |
6769 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6770 |
match(Set dst (SubVS src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6771 |
format %{ "vpsubw $dst,$src,$mem\t! sub packed8S" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6772 |
ins_encode %{ |
30624 | 6773 |
int vector_len = 0; |
6774 |
__ vpsubw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6775 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6776 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6777 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6778 |
|
51857 | 6779 |
instruct vsub16S_reg(vecY dst, vecY src1, vecY src2) %{ |
6780 |
predicate(UseAVX > 1 && n->as_Vector()->length() == 16); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6781 |
match(Set dst (SubVS src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6782 |
format %{ "vpsubw $dst,$src1,$src2\t! sub packed16S" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6783 |
ins_encode %{ |
30624 | 6784 |
int vector_len = 1; |
6785 |
__ vpsubw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6786 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6787 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6788 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6789 |
|
51857 | 6790 |
instruct vsub16S_mem(vecY dst, vecY src, memory mem) %{ |
6791 |
predicate(UseAVX > 1 && n->as_Vector()->length() == 16); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6792 |
match(Set dst (SubVS src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6793 |
format %{ "vpsubw $dst,$src,$mem\t! sub packed16S" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6794 |
ins_encode %{ |
30624 | 6795 |
int vector_len = 1; |
6796 |
__ vpsubw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
6797 |
%} |
|
6798 |
ins_pipe( pipe_slow ); |
|
6799 |
%} |
|
6800 |
||
6801 |
instruct vsub32S_reg(vecZ dst, vecZ src1, vecZ src2) %{ |
|
51857 | 6802 |
predicate(UseAVX > 2 && VM_Version::supports_avx512bw() && n->as_Vector()->length() == 32); |
30624 | 6803 |
match(Set dst (SubVS src1 src2)); |
6804 |
format %{ "vpsubw $dst,$src1,$src2\t! sub packed32S" %} |
|
6805 |
ins_encode %{ |
|
6806 |
int vector_len = 2; |
|
6807 |
__ vpsubw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
6808 |
%} |
|
6809 |
ins_pipe( pipe_slow ); |
|
6810 |
%} |
|
6811 |
||
6812 |
instruct vsub32S_mem(vecZ dst, vecZ src, memory mem) %{ |
|
51857 | 6813 |
predicate(UseAVX > 2 && VM_Version::supports_avx512bw() && n->as_Vector()->length() == 32); |
30624 | 6814 |
match(Set dst (SubVS src (LoadVector mem))); |
6815 |
format %{ "vpsubw $dst,$src,$mem\t! sub packed32S" %} |
|
6816 |
ins_encode %{ |
|
6817 |
int vector_len = 2; |
|
6818 |
__ vpsubw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6819 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6820 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6821 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6822 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6823 |
// Integers vector sub |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6824 |
instruct vsub2I(vecD dst, vecD src) %{ |
51857 | 6825 |
predicate(UseAVX == 0 && n->as_Vector()->length() == 2); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6826 |
match(Set dst (SubVI dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6827 |
format %{ "psubd $dst,$src\t! sub packed2I" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6828 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6829 |
__ psubd($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6830 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6831 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6832 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6833 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6834 |
instruct vsub2I_reg(vecD dst, vecD src1, vecD src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6835 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6836 |
match(Set dst (SubVI src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6837 |
format %{ "vpsubd $dst,$src1,$src2\t! sub packed2I" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6838 |
ins_encode %{ |
30624 | 6839 |
int vector_len = 0; |
6840 |
__ vpsubd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6841 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6842 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6843 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6844 |
|
31410 | 6845 |
instruct vsub2I_mem(vecD dst, vecD src, memory mem) %{ |
6846 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
|
6847 |
match(Set dst (SubVI src (LoadVector mem))); |
|
6848 |
format %{ "vpsubd $dst,$src,$mem\t! sub packed2I" %} |
|
6849 |
ins_encode %{ |
|
6850 |
int vector_len = 0; |
|
6851 |
__ vpsubd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
6852 |
%} |
|
6853 |
ins_pipe( pipe_slow ); |
|
6854 |
%} |
|
6855 |
||
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6856 |
instruct vsub4I(vecX dst, vecX src) %{ |
51857 | 6857 |
predicate(UseAVX == 0 && n->as_Vector()->length() == 4); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6858 |
match(Set dst (SubVI dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6859 |
format %{ "psubd $dst,$src\t! sub packed4I" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6860 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6861 |
__ psubd($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6862 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6863 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6864 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6865 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6866 |
instruct vsub4I_reg(vecX dst, vecX src1, vecX src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6867 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6868 |
match(Set dst (SubVI src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6869 |
format %{ "vpsubd $dst,$src1,$src2\t! sub packed4I" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6870 |
ins_encode %{ |
30624 | 6871 |
int vector_len = 0; |
6872 |
__ vpsubd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6873 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6874 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6875 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6876 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6877 |
instruct vsub4I_mem(vecX dst, vecX src, memory mem) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6878 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6879 |
match(Set dst (SubVI src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6880 |
format %{ "vpsubd $dst,$src,$mem\t! sub packed4I" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6881 |
ins_encode %{ |
30624 | 6882 |
int vector_len = 0; |
6883 |
__ vpsubd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6884 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6885 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6886 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6887 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6888 |
instruct vsub8I_reg(vecY dst, vecY src1, vecY src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6889 |
predicate(UseAVX > 1 && n->as_Vector()->length() == 8); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6890 |
match(Set dst (SubVI src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6891 |
format %{ "vpsubd $dst,$src1,$src2\t! sub packed8I" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6892 |
ins_encode %{ |
30624 | 6893 |
int vector_len = 1; |
6894 |
__ vpsubd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6895 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6896 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6897 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6898 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6899 |
instruct vsub8I_mem(vecY dst, vecY src, memory mem) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6900 |
predicate(UseAVX > 1 && n->as_Vector()->length() == 8); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6901 |
match(Set dst (SubVI src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6902 |
format %{ "vpsubd $dst,$src,$mem\t! sub packed8I" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6903 |
ins_encode %{ |
30624 | 6904 |
int vector_len = 1; |
6905 |
__ vpsubd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
6906 |
%} |
|
6907 |
ins_pipe( pipe_slow ); |
|
6908 |
%} |
|
6909 |
||
6910 |
instruct vsub16I_reg(vecZ dst, vecZ src1, vecZ src2) %{ |
|
6911 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 16); |
|
6912 |
match(Set dst (SubVI src1 src2)); |
|
6913 |
format %{ "vpsubd $dst,$src1,$src2\t! sub packed16I" %} |
|
6914 |
ins_encode %{ |
|
6915 |
int vector_len = 2; |
|
6916 |
__ vpsubd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
6917 |
%} |
|
6918 |
ins_pipe( pipe_slow ); |
|
6919 |
%} |
|
6920 |
||
6921 |
instruct vsub16I_mem(vecZ dst, vecZ src, memory mem) %{ |
|
6922 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 16); |
|
6923 |
match(Set dst (SubVI src (LoadVector mem))); |
|
6924 |
format %{ "vpsubd $dst,$src,$mem\t! sub packed16I" %} |
|
6925 |
ins_encode %{ |
|
6926 |
int vector_len = 2; |
|
6927 |
__ vpsubd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6928 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6929 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6930 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6931 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6932 |
// Longs vector sub |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6933 |
instruct vsub2L(vecX dst, vecX src) %{ |
51857 | 6934 |
predicate(UseAVX == 0 && n->as_Vector()->length() == 2); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6935 |
match(Set dst (SubVL dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6936 |
format %{ "psubq $dst,$src\t! sub packed2L" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6937 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6938 |
__ psubq($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6939 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6940 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6941 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6942 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6943 |
instruct vsub2L_reg(vecX dst, vecX src1, vecX src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6944 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6945 |
match(Set dst (SubVL src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6946 |
format %{ "vpsubq $dst,$src1,$src2\t! sub packed2L" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6947 |
ins_encode %{ |
30624 | 6948 |
int vector_len = 0; |
6949 |
__ vpsubq($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6950 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6951 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6952 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6953 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6954 |
instruct vsub2L_mem(vecX dst, vecX src, memory mem) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6955 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6956 |
match(Set dst (SubVL src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6957 |
format %{ "vpsubq $dst,$src,$mem\t! sub packed2L" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6958 |
ins_encode %{ |
30624 | 6959 |
int vector_len = 0; |
6960 |
__ vpsubq($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6961 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6962 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6963 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6964 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6965 |
instruct vsub4L_reg(vecY dst, vecY src1, vecY src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6966 |
predicate(UseAVX > 1 && n->as_Vector()->length() == 4); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6967 |
match(Set dst (SubVL src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6968 |
format %{ "vpsubq $dst,$src1,$src2\t! sub packed4L" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6969 |
ins_encode %{ |
30624 | 6970 |
int vector_len = 1; |
6971 |
__ vpsubq($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6972 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6973 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6974 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6975 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6976 |
instruct vsub4L_mem(vecY dst, vecY src, memory mem) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6977 |
predicate(UseAVX > 1 && n->as_Vector()->length() == 4); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6978 |
match(Set dst (SubVL src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6979 |
format %{ "vpsubq $dst,$src,$mem\t! sub packed4L" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
6980 |
ins_encode %{ |
30624 | 6981 |
int vector_len = 1; |
6982 |
__ vpsubq($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
6983 |
%} |
|
6984 |
ins_pipe( pipe_slow ); |
|
6985 |
%} |
|
6986 |
||
6987 |
instruct vsub8L_reg(vecZ dst, vecZ src1, vecZ src2) %{ |
|
6988 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 8); |
|
6989 |
match(Set dst (SubVL src1 src2)); |
|
6990 |
format %{ "vpsubq $dst,$src1,$src2\t! sub packed8L" %} |
|
6991 |
ins_encode %{ |
|
6992 |
int vector_len = 2; |
|
6993 |
__ vpsubq($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
6994 |
%} |
|
6995 |
ins_pipe( pipe_slow ); |
|
6996 |
%} |
|
6997 |
||
6998 |
instruct vsub8L_mem(vecZ dst, vecZ src, memory mem) %{ |
|
6999 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 8); |
|
7000 |
match(Set dst (SubVL src (LoadVector mem))); |
|
7001 |
format %{ "vpsubq $dst,$src,$mem\t! sub packed8L" %} |
|
7002 |
ins_encode %{ |
|
7003 |
int vector_len = 2; |
|
7004 |
__ vpsubq($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7005 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7006 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7007 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7008 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7009 |
// Floats vector sub |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7010 |
instruct vsub2F(vecD dst, vecD src) %{ |
51857 | 7011 |
predicate(UseAVX == 0 && n->as_Vector()->length() == 2); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7012 |
match(Set dst (SubVF dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7013 |
format %{ "subps $dst,$src\t! sub packed2F" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7014 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7015 |
__ subps($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7016 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7017 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7018 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7019 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7020 |
instruct vsub2F_reg(vecD dst, vecD src1, vecD src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7021 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7022 |
match(Set dst (SubVF src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7023 |
format %{ "vsubps $dst,$src1,$src2\t! sub packed2F" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7024 |
ins_encode %{ |
30624 | 7025 |
int vector_len = 0; |
7026 |
__ vsubps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7027 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7028 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7029 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7030 |
|
31410 | 7031 |
instruct vsub2F_mem(vecD dst, vecD src, memory mem) %{ |
7032 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
|
7033 |
match(Set dst (SubVF src (LoadVector mem))); |
|
7034 |
format %{ "vsubps $dst,$src,$mem\t! sub packed2F" %} |
|
7035 |
ins_encode %{ |
|
7036 |
int vector_len = 0; |
|
7037 |
__ vsubps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
7038 |
%} |
|
7039 |
ins_pipe( pipe_slow ); |
|
7040 |
%} |
|
7041 |
||
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7042 |
instruct vsub4F(vecX dst, vecX src) %{ |
51857 | 7043 |
predicate(UseAVX == 0 && n->as_Vector()->length() == 4); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7044 |
match(Set dst (SubVF dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7045 |
format %{ "subps $dst,$src\t! sub packed4F" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7046 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7047 |
__ subps($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7048 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7049 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7050 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7051 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7052 |
instruct vsub4F_reg(vecX dst, vecX src1, vecX src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7053 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7054 |
match(Set dst (SubVF src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7055 |
format %{ "vsubps $dst,$src1,$src2\t! sub packed4F" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7056 |
ins_encode %{ |
30624 | 7057 |
int vector_len = 0; |
7058 |
__ vsubps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7059 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7060 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7061 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7062 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7063 |
instruct vsub4F_mem(vecX dst, vecX src, memory mem) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7064 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7065 |
match(Set dst (SubVF src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7066 |
format %{ "vsubps $dst,$src,$mem\t! sub packed4F" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7067 |
ins_encode %{ |
30624 | 7068 |
int vector_len = 0; |
7069 |
__ vsubps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7070 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7071 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7072 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7073 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7074 |
instruct vsub8F_reg(vecY dst, vecY src1, vecY src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7075 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7076 |
match(Set dst (SubVF src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7077 |
format %{ "vsubps $dst,$src1,$src2\t! sub packed8F" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7078 |
ins_encode %{ |
30624 | 7079 |
int vector_len = 1; |
7080 |
__ vsubps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7081 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7082 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7083 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7084 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7085 |
instruct vsub8F_mem(vecY dst, vecY src, memory mem) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7086 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7087 |
match(Set dst (SubVF src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7088 |
format %{ "vsubps $dst,$src,$mem\t! sub packed8F" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7089 |
ins_encode %{ |
30624 | 7090 |
int vector_len = 1; |
7091 |
__ vsubps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
7092 |
%} |
|
7093 |
ins_pipe( pipe_slow ); |
|
7094 |
%} |
|
7095 |
||
7096 |
instruct vsub16F_reg(vecZ dst, vecZ src1, vecZ src2) %{ |
|
7097 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 16); |
|
7098 |
match(Set dst (SubVF src1 src2)); |
|
7099 |
format %{ "vsubps $dst,$src1,$src2\t! sub packed16F" %} |
|
7100 |
ins_encode %{ |
|
7101 |
int vector_len = 2; |
|
7102 |
__ vsubps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
7103 |
%} |
|
7104 |
ins_pipe( pipe_slow ); |
|
7105 |
%} |
|
7106 |
||
7107 |
instruct vsub16F_mem(vecZ dst, vecZ src, memory mem) %{ |
|
7108 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 16); |
|
7109 |
match(Set dst (SubVF src (LoadVector mem))); |
|
7110 |
format %{ "vsubps $dst,$src,$mem\t! sub packed16F" %} |
|
7111 |
ins_encode %{ |
|
7112 |
int vector_len = 2; |
|
7113 |
__ vsubps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7114 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7115 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7116 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7117 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7118 |
// Doubles vector sub |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7119 |
instruct vsub2D(vecX dst, vecX src) %{ |
51857 | 7120 |
predicate(UseAVX == 0 && n->as_Vector()->length() == 2); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7121 |
match(Set dst (SubVD dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7122 |
format %{ "subpd $dst,$src\t! sub packed2D" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7123 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7124 |
__ subpd($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7125 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7126 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7127 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7128 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7129 |
instruct vsub2D_reg(vecX dst, vecX src1, vecX src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7130 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7131 |
match(Set dst (SubVD src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7132 |
format %{ "vsubpd $dst,$src1,$src2\t! sub packed2D" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7133 |
ins_encode %{ |
30624 | 7134 |
int vector_len = 0; |
7135 |
__ vsubpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7136 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7137 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7138 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7139 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7140 |
instruct vsub2D_mem(vecX dst, vecX src, memory mem) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7141 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7142 |
match(Set dst (SubVD src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7143 |
format %{ "vsubpd $dst,$src,$mem\t! sub packed2D" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7144 |
ins_encode %{ |
30624 | 7145 |
int vector_len = 0; |
7146 |
__ vsubpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7147 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7148 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7149 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7150 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7151 |
instruct vsub4D_reg(vecY dst, vecY src1, vecY src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7152 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7153 |
match(Set dst (SubVD src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7154 |
format %{ "vsubpd $dst,$src1,$src2\t! sub packed4D" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7155 |
ins_encode %{ |
30624 | 7156 |
int vector_len = 1; |
7157 |
__ vsubpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7158 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7159 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7160 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7161 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7162 |
instruct vsub4D_mem(vecY dst, vecY src, memory mem) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7163 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7164 |
match(Set dst (SubVD src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7165 |
format %{ "vsubpd $dst,$src,$mem\t! sub packed4D" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7166 |
ins_encode %{ |
30624 | 7167 |
int vector_len = 1; |
7168 |
__ vsubpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
7169 |
%} |
|
7170 |
ins_pipe( pipe_slow ); |
|
7171 |
%} |
|
7172 |
||
7173 |
instruct vsub8D_reg(vecZ dst, vecZ src1, vecZ src2) %{ |
|
7174 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 8); |
|
7175 |
match(Set dst (SubVD src1 src2)); |
|
7176 |
format %{ "vsubpd $dst,$src1,$src2\t! sub packed8D" %} |
|
7177 |
ins_encode %{ |
|
7178 |
int vector_len = 2; |
|
7179 |
__ vsubpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
7180 |
%} |
|
7181 |
ins_pipe( pipe_slow ); |
|
7182 |
%} |
|
7183 |
||
7184 |
instruct vsub8D_mem(vecZ dst, vecZ src, memory mem) %{ |
|
7185 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 8); |
|
7186 |
match(Set dst (SubVD src (LoadVector mem))); |
|
7187 |
format %{ "vsubpd $dst,$src,$mem\t! sub packed8D" %} |
|
7188 |
ins_encode %{ |
|
7189 |
int vector_len = 2; |
|
7190 |
__ vsubpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7191 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7192 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7193 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7194 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7195 |
// --------------------------------- MUL -------------------------------------- |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7196 |
|
54750 | 7197 |
// Byte vector mul |
7198 |
instruct mul4B_reg(vecS dst, vecS src1, vecS src2, vecS tmp, rRegI scratch) %{ |
|
7199 |
predicate(UseSSE > 3 && n->as_Vector()->length() == 4); |
|
7200 |
match(Set dst (MulVB src1 src2)); |
|
7201 |
effect(TEMP dst, TEMP tmp, TEMP scratch); |
|
7202 |
format %{"pmovsxbw $tmp,$src1\n\t" |
|
7203 |
"pmovsxbw $dst,$src2\n\t" |
|
7204 |
"pmullw $tmp,$dst\n\t" |
|
7205 |
"movdqu $dst,[0x00ff00ff0x00ff00ff]\n\t" |
|
7206 |
"pand $dst,$tmp\n\t" |
|
7207 |
"packuswb $dst,$dst\t! mul packed4B" %} |
|
7208 |
ins_encode %{ |
|
7209 |
__ pmovsxbw($tmp$$XMMRegister, $src1$$XMMRegister); |
|
7210 |
__ pmovsxbw($dst$$XMMRegister, $src2$$XMMRegister); |
|
7211 |
__ pmullw($tmp$$XMMRegister, $dst$$XMMRegister); |
|
7212 |
__ movdqu($dst$$XMMRegister, ExternalAddress(vector_short_to_byte_mask()), $scratch$$Register); |
|
7213 |
__ pand($dst$$XMMRegister, $tmp$$XMMRegister); |
|
7214 |
__ packuswb($dst$$XMMRegister, $dst$$XMMRegister); |
|
7215 |
%} |
|
7216 |
ins_pipe( pipe_slow ); |
|
7217 |
%} |
|
7218 |
||
7219 |
instruct mul8B_reg(vecD dst, vecD src1, vecD src2, vecD tmp, rRegI scratch) %{ |
|
7220 |
predicate(UseSSE > 3 && n->as_Vector()->length() == 8); |
|
7221 |
match(Set dst (MulVB src1 src2)); |
|
7222 |
effect(TEMP dst, TEMP tmp, TEMP scratch); |
|
7223 |
format %{"pmovsxbw $tmp,$src1\n\t" |
|
7224 |
"pmovsxbw $dst,$src2\n\t" |
|
7225 |
"pmullw $tmp,$dst\n\t" |
|
7226 |
"movdqu $dst,[0x00ff00ff0x00ff00ff]\n\t" |
|
7227 |
"pand $dst,$tmp\n\t" |
|
7228 |
"packuswb $dst,$dst\t! mul packed8B" %} |
|
7229 |
ins_encode %{ |
|
7230 |
__ pmovsxbw($tmp$$XMMRegister, $src1$$XMMRegister); |
|
7231 |
__ pmovsxbw($dst$$XMMRegister, $src2$$XMMRegister); |
|
7232 |
__ pmullw($tmp$$XMMRegister, $dst$$XMMRegister); |
|
7233 |
__ movdqu($dst$$XMMRegister, ExternalAddress(vector_short_to_byte_mask()), $scratch$$Register); |
|
7234 |
__ pand($dst$$XMMRegister, $tmp$$XMMRegister); |
|
7235 |
__ packuswb($dst$$XMMRegister, $dst$$XMMRegister); |
|
7236 |
%} |
|
7237 |
ins_pipe( pipe_slow ); |
|
7238 |
%} |
|
7239 |
||
7240 |
instruct mul16B_reg(vecX dst, vecX src1, vecX src2, vecX tmp1, vecX tmp2, rRegI scratch) %{ |
|
7241 |
predicate(UseSSE > 3 && n->as_Vector()->length() == 16); |
|
7242 |
match(Set dst (MulVB src1 src2)); |
|
7243 |
effect(TEMP dst, TEMP tmp1, TEMP tmp2, TEMP scratch); |
|
7244 |
format %{"pmovsxbw $tmp1,$src1\n\t" |
|
7245 |
"pmovsxbw $tmp2,$src2\n\t" |
|
7246 |
"pmullw $tmp1,$tmp2\n\t" |
|
7247 |
"pshufd $tmp2,$src1,0xEE\n\t" |
|
7248 |
"pshufd $dst,$src2,0xEE\n\t" |
|
7249 |
"pmovsxbw $tmp2,$tmp2\n\t" |
|
7250 |
"pmovsxbw $dst,$dst\n\t" |
|
7251 |
"pmullw $tmp2,$dst\n\t" |
|
7252 |
"movdqu $dst,[0x00ff00ff0x00ff00ff]\n\t" |
|
7253 |
"pand $tmp2,$dst\n\t" |
|
7254 |
"pand $dst,$tmp1\n\t" |
|
7255 |
"packuswb $dst,$tmp2\t! mul packed16B" %} |
|
7256 |
ins_encode %{ |
|
7257 |
__ pmovsxbw($tmp1$$XMMRegister, $src1$$XMMRegister); |
|
7258 |
__ pmovsxbw($tmp2$$XMMRegister, $src2$$XMMRegister); |
|
7259 |
__ pmullw($tmp1$$XMMRegister, $tmp2$$XMMRegister); |
|
7260 |
__ pshufd($tmp2$$XMMRegister, $src1$$XMMRegister, 0xEE); |
|
7261 |
__ pshufd($dst$$XMMRegister, $src2$$XMMRegister, 0xEE); |
|
7262 |
__ pmovsxbw($tmp2$$XMMRegister, $tmp2$$XMMRegister); |
|
7263 |
__ pmovsxbw($dst$$XMMRegister, $dst$$XMMRegister); |
|
7264 |
__ pmullw($tmp2$$XMMRegister, $dst$$XMMRegister); |
|
7265 |
__ movdqu($dst$$XMMRegister, ExternalAddress(vector_short_to_byte_mask()), $scratch$$Register); |
|
7266 |
__ pand($tmp2$$XMMRegister, $dst$$XMMRegister); |
|
7267 |
__ pand($dst$$XMMRegister, $tmp1$$XMMRegister); |
|
7268 |
__ packuswb($dst$$XMMRegister, $tmp2$$XMMRegister); |
|
7269 |
%} |
|
7270 |
ins_pipe( pipe_slow ); |
|
7271 |
%} |
|
7272 |
||
7273 |
instruct vmul16B_reg_avx(vecX dst, vecX src1, vecX src2, vecX tmp, rRegI scratch) %{ |
|
7274 |
predicate(UseAVX > 1 && n->as_Vector()->length() == 16); |
|
7275 |
match(Set dst (MulVB src1 src2)); |
|
7276 |
effect(TEMP dst, TEMP tmp, TEMP scratch); |
|
7277 |
format %{"vpmovsxbw $tmp,$src1\n\t" |
|
7278 |
"vpmovsxbw $dst,$src2\n\t" |
|
7279 |
"vpmullw $tmp,$tmp,$dst\n\t" |
|
7280 |
"vmovdqu $dst,[0x00ff00ff0x00ff00ff]\n\t" |
|
7281 |
"vpand $dst,$dst,$tmp\n\t" |
|
7282 |
"vextracti128_high $tmp,$dst\n\t" |
|
7283 |
"vpackuswb $dst,$dst,$dst\n\t! mul packed16B" %} |
|
7284 |
ins_encode %{ |
|
7285 |
int vector_len = 1; |
|
7286 |
__ vpmovsxbw($tmp$$XMMRegister, $src1$$XMMRegister, vector_len); |
|
7287 |
__ vpmovsxbw($dst$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
7288 |
__ vpmullw($tmp$$XMMRegister, $tmp$$XMMRegister, $dst$$XMMRegister, vector_len); |
|
7289 |
__ vmovdqu($dst$$XMMRegister, ExternalAddress(vector_short_to_byte_mask()), $scratch$$Register); |
|
7290 |
__ vpand($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister, vector_len); |
|
7291 |
__ vextracti128_high($tmp$$XMMRegister, $dst$$XMMRegister); |
|
7292 |
__ vpackuswb($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister, 0); |
|
7293 |
%} |
|
7294 |
ins_pipe( pipe_slow ); |
|
7295 |
%} |
|
7296 |
||
7297 |
instruct vmul32B_reg_avx(vecY dst, vecY src1, vecY src2, vecY tmp1, vecY tmp2, rRegI scratch) %{ |
|
7298 |
predicate(UseAVX > 1 && n->as_Vector()->length() == 32); |
|
7299 |
match(Set dst (MulVB src1 src2)); |
|
7300 |
effect(TEMP dst, TEMP tmp1, TEMP tmp2, TEMP scratch); |
|
7301 |
format %{"vextracti128_high $tmp1,$src1\n\t" |
|
7302 |
"vextracti128_high $dst,$src2\n\t" |
|
7303 |
"vpmovsxbw $tmp1,$tmp1\n\t" |
|
7304 |
"vpmovsxbw $dst,$dst\n\t" |
|
7305 |
"vpmullw $tmp1,$tmp1,$dst\n\t" |
|
7306 |
"vpmovsxbw $tmp2,$src1\n\t" |
|
7307 |
"vpmovsxbw $dst,$src2\n\t" |
|
7308 |
"vpmullw $tmp2,$tmp2,$dst\n\t" |
|
7309 |
"vmovdqu $dst, [0x00ff00ff0x00ff00ff]\n\t" |
|
7310 |
"vpbroadcastd $dst, $dst\n\t" |
|
7311 |
"vpand $tmp1,$tmp1,$dst\n\t" |
|
7312 |
"vpand $dst,$dst,$tmp2\n\t" |
|
7313 |
"vpackuswb $dst,$dst,$tmp1\n\t" |
|
7314 |
"vpermq $dst, $dst, 0xD8\t! mul packed32B" %} |
|
7315 |
ins_encode %{ |
|
7316 |
int vector_len = 1; |
|
7317 |
__ vextracti128_high($tmp1$$XMMRegister, $src1$$XMMRegister); |
|
7318 |
__ vextracti128_high($dst$$XMMRegister, $src2$$XMMRegister); |
|
7319 |
__ vpmovsxbw($tmp1$$XMMRegister, $tmp1$$XMMRegister, vector_len); |
|
7320 |
__ vpmovsxbw($dst$$XMMRegister, $dst$$XMMRegister, vector_len); |
|
7321 |
__ vpmullw($tmp1$$XMMRegister, $tmp1$$XMMRegister, $dst$$XMMRegister, vector_len); |
|
7322 |
__ vpmovsxbw($tmp2$$XMMRegister, $src1$$XMMRegister, vector_len); |
|
7323 |
__ vpmovsxbw($dst$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
7324 |
__ vpmullw($tmp2$$XMMRegister, $tmp2$$XMMRegister, $dst$$XMMRegister, vector_len); |
|
7325 |
__ vmovdqu($dst$$XMMRegister, ExternalAddress(vector_short_to_byte_mask()), $scratch$$Register); |
|
7326 |
__ vpbroadcastd($dst$$XMMRegister, $dst$$XMMRegister, vector_len); |
|
7327 |
__ vpand($tmp1$$XMMRegister, $tmp1$$XMMRegister, $dst$$XMMRegister, vector_len); |
|
7328 |
__ vpand($dst$$XMMRegister, $dst$$XMMRegister, $tmp2$$XMMRegister, vector_len); |
|
7329 |
__ vpackuswb($dst$$XMMRegister, $dst$$XMMRegister, $tmp1$$XMMRegister, vector_len); |
|
7330 |
__ vpermq($dst$$XMMRegister, $dst$$XMMRegister, 0xD8, vector_len); |
|
7331 |
%} |
|
7332 |
ins_pipe( pipe_slow ); |
|
7333 |
%} |
|
7334 |
||
7335 |
instruct vmul64B_reg_avx(vecZ dst, vecZ src1, vecZ src2, vecZ tmp1, vecZ tmp2, rRegI scratch) %{ |
|
7336 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 64); |
|
7337 |
match(Set dst (MulVB src1 src2)); |
|
7338 |
effect(TEMP dst, TEMP tmp1, TEMP tmp2, TEMP scratch); |
|
7339 |
format %{"vextracti64x4_high $tmp1,$src1\n\t" |
|
7340 |
"vextracti64x4_high $dst,$src2\n\t" |
|
7341 |
"vpmovsxbw $tmp1,$tmp1\n\t" |
|
7342 |
"vpmovsxbw $dst,$dst\n\t" |
|
7343 |
"vpmullw $tmp1,$tmp1,$dst\n\t" |
|
7344 |
"vpmovsxbw $tmp2,$src1\n\t" |
|
7345 |
"vpmovsxbw $dst,$src2\n\t" |
|
7346 |
"vpmullw $tmp2,$tmp2,$dst\n\t" |
|
7347 |
"vmovdqu $dst, [0x00ff00ff0x00ff00ff]\n\t" |
|
7348 |
"vpbroadcastd $dst, $dst\n\t" |
|
7349 |
"vpand $tmp1,$tmp1,$dst\n\t" |
|
7350 |
"vpand $tmp2,$tmp2,$dst\n\t" |
|
7351 |
"vpackuswb $dst,$tmp1,$tmp2\n\t" |
|
7352 |
"evmovdquq $tmp2,[0x0604020007050301]\n\t" |
|
7353 |
"vpermq $dst,$tmp2,$dst,0x01\t! mul packed64B" %} |
|
7354 |
||
7355 |
ins_encode %{ |
|
7356 |
int vector_len = 2; |
|
7357 |
__ vextracti64x4_high($tmp1$$XMMRegister, $src1$$XMMRegister); |
|
7358 |
__ vextracti64x4_high($dst$$XMMRegister, $src2$$XMMRegister); |
|
7359 |
__ vpmovsxbw($tmp1$$XMMRegister, $tmp1$$XMMRegister, vector_len); |
|
7360 |
__ vpmovsxbw($dst$$XMMRegister, $dst$$XMMRegister, vector_len); |
|
7361 |
__ vpmullw($tmp1$$XMMRegister, $tmp1$$XMMRegister, $dst$$XMMRegister, vector_len); |
|
7362 |
__ vpmovsxbw($tmp2$$XMMRegister, $src1$$XMMRegister, vector_len); |
|
7363 |
__ vpmovsxbw($dst$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
7364 |
__ vpmullw($tmp2$$XMMRegister, $tmp2$$XMMRegister, $dst$$XMMRegister, vector_len); |
|
7365 |
__ vmovdqu($dst$$XMMRegister, ExternalAddress(vector_short_to_byte_mask()), $scratch$$Register); |
|
7366 |
__ vpbroadcastd($dst$$XMMRegister, $dst$$XMMRegister, vector_len); |
|
7367 |
__ vpand($tmp1$$XMMRegister, $tmp1$$XMMRegister, $dst$$XMMRegister, vector_len); |
|
7368 |
__ vpand($tmp2$$XMMRegister, $tmp2$$XMMRegister, $dst$$XMMRegister, vector_len); |
|
7369 |
__ vpackuswb($dst$$XMMRegister, $tmp1$$XMMRegister, $tmp2$$XMMRegister, vector_len); |
|
7370 |
__ evmovdquq($tmp2$$XMMRegister, ExternalAddress(vector_byte_perm_mask()), vector_len, $scratch$$Register); |
|
7371 |
__ vpermq($dst$$XMMRegister, $tmp2$$XMMRegister, $dst$$XMMRegister, vector_len); |
|
7372 |
||
7373 |
%} |
|
7374 |
ins_pipe( pipe_slow ); |
|
7375 |
%} |
|
7376 |
||
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7377 |
// Shorts/Chars vector mul |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7378 |
instruct vmul2S(vecS dst, vecS src) %{ |
34162 | 7379 |
predicate(UseAVX == 0 && n->as_Vector()->length() == 2); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7380 |
match(Set dst (MulVS dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7381 |
format %{ "pmullw $dst,$src\t! mul packed2S" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7382 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7383 |
__ pmullw($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7384 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7385 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7386 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7387 |
|
51857 | 7388 |
instruct vmul2S_reg(vecS dst, vecS src1, vecS src2) %{ |
7389 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7390 |
match(Set dst (MulVS src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7391 |
format %{ "vpmullw $dst,$src1,$src2\t! mul packed2S" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7392 |
ins_encode %{ |
30624 | 7393 |
int vector_len = 0; |
7394 |
__ vpmullw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7395 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7396 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7397 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7398 |
|
51857 | 7399 |
instruct vmul2S_mem(vecS dst, vecS src, memory mem) %{ |
7400 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
|
31410 | 7401 |
match(Set dst (MulVS src (LoadVector mem))); |
7402 |
format %{ "vpmullw $dst,$src,$mem\t! mul packed2S" %} |
|
7403 |
ins_encode %{ |
|
7404 |
int vector_len = 0; |
|
7405 |
__ vpmullw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
7406 |
%} |
|
7407 |
ins_pipe( pipe_slow ); |
|
7408 |
%} |
|
7409 |
||
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7410 |
instruct vmul4S(vecD dst, vecD src) %{ |
34162 | 7411 |
predicate(UseAVX == 0 && n->as_Vector()->length() == 4); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7412 |
match(Set dst (MulVS dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7413 |
format %{ "pmullw $dst,$src\t! mul packed4S" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7414 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7415 |
__ pmullw($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7416 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7417 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7418 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7419 |
|
51857 | 7420 |
instruct vmul4S_reg(vecD dst, vecD src1, vecD src2) %{ |
7421 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7422 |
match(Set dst (MulVS src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7423 |
format %{ "vpmullw $dst,$src1,$src2\t! mul packed4S" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7424 |
ins_encode %{ |
30624 | 7425 |
int vector_len = 0; |
7426 |
__ vpmullw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7427 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7428 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7429 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7430 |
|
51857 | 7431 |
instruct vmul4S_mem(vecD dst, vecD src, memory mem) %{ |
7432 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
|
31410 | 7433 |
match(Set dst (MulVS src (LoadVector mem))); |
7434 |
format %{ "vpmullw $dst,$src,$mem\t! mul packed4S" %} |
|
7435 |
ins_encode %{ |
|
7436 |
int vector_len = 0; |
|
7437 |
__ vpmullw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
7438 |
%} |
|
7439 |
ins_pipe( pipe_slow ); |
|
7440 |
%} |
|
7441 |
||
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7442 |
instruct vmul8S(vecX dst, vecX src) %{ |
34162 | 7443 |
predicate(UseAVX == 0 && n->as_Vector()->length() == 8); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7444 |
match(Set dst (MulVS dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7445 |
format %{ "pmullw $dst,$src\t! mul packed8S" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7446 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7447 |
__ pmullw($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7448 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7449 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7450 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7451 |
|
51857 | 7452 |
instruct vmul8S_reg(vecX dst, vecX src1, vecX src2) %{ |
7453 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7454 |
match(Set dst (MulVS src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7455 |
format %{ "vpmullw $dst,$src1,$src2\t! mul packed8S" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7456 |
ins_encode %{ |
30624 | 7457 |
int vector_len = 0; |
7458 |
__ vpmullw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7459 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7460 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7461 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7462 |
|
51857 | 7463 |
instruct vmul8S_mem(vecX dst, vecX src, memory mem) %{ |
7464 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7465 |
match(Set dst (MulVS src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7466 |
format %{ "vpmullw $dst,$src,$mem\t! mul packed8S" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7467 |
ins_encode %{ |
30624 | 7468 |
int vector_len = 0; |
7469 |
__ vpmullw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7470 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7471 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7472 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7473 |
|
51857 | 7474 |
instruct vmul16S_reg(vecY dst, vecY src1, vecY src2) %{ |
7475 |
predicate(UseAVX > 1 && n->as_Vector()->length() == 16); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7476 |
match(Set dst (MulVS src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7477 |
format %{ "vpmullw $dst,$src1,$src2\t! mul packed16S" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7478 |
ins_encode %{ |
30624 | 7479 |
int vector_len = 1; |
7480 |
__ vpmullw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7481 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7482 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7483 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7484 |
|
51857 | 7485 |
instruct vmul16S_mem(vecY dst, vecY src, memory mem) %{ |
7486 |
predicate(UseAVX > 1 && n->as_Vector()->length() == 16); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7487 |
match(Set dst (MulVS src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7488 |
format %{ "vpmullw $dst,$src,$mem\t! mul packed16S" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7489 |
ins_encode %{ |
30624 | 7490 |
int vector_len = 1; |
7491 |
__ vpmullw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
7492 |
%} |
|
7493 |
ins_pipe( pipe_slow ); |
|
7494 |
%} |
|
7495 |
||
7496 |
instruct vmul32S_reg(vecZ dst, vecZ src1, vecZ src2) %{ |
|
51857 | 7497 |
predicate(UseAVX > 2 && VM_Version::supports_avx512bw() && n->as_Vector()->length() == 32); |
30624 | 7498 |
match(Set dst (MulVS src1 src2)); |
7499 |
format %{ "vpmullw $dst,$src1,$src2\t! mul packed32S" %} |
|
7500 |
ins_encode %{ |
|
7501 |
int vector_len = 2; |
|
7502 |
__ vpmullw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
7503 |
%} |
|
7504 |
ins_pipe( pipe_slow ); |
|
7505 |
%} |
|
7506 |
||
7507 |
instruct vmul32S_mem(vecZ dst, vecZ src, memory mem) %{ |
|
51857 | 7508 |
predicate(UseAVX > 2 && VM_Version::supports_avx512bw() && n->as_Vector()->length() == 32); |
30624 | 7509 |
match(Set dst (MulVS src (LoadVector mem))); |
7510 |
format %{ "vpmullw $dst,$src,$mem\t! mul packed32S" %} |
|
7511 |
ins_encode %{ |
|
7512 |
int vector_len = 2; |
|
7513 |
__ vpmullw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7514 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7515 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7516 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7517 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7518 |
// Integers vector mul (sse4_1) |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7519 |
instruct vmul2I(vecD dst, vecD src) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7520 |
predicate(UseSSE > 3 && n->as_Vector()->length() == 2); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7521 |
match(Set dst (MulVI dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7522 |
format %{ "pmulld $dst,$src\t! mul packed2I" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7523 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7524 |
__ pmulld($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7525 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7526 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7527 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7528 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7529 |
instruct vmul2I_reg(vecD dst, vecD src1, vecD src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7530 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7531 |
match(Set dst (MulVI src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7532 |
format %{ "vpmulld $dst,$src1,$src2\t! mul packed2I" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7533 |
ins_encode %{ |
30624 | 7534 |
int vector_len = 0; |
7535 |
__ vpmulld($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
7536 |
%} |
|
7537 |
ins_pipe( pipe_slow ); |
|
7538 |
%} |
|
7539 |
||
31410 | 7540 |
instruct vmul2I_mem(vecD dst, vecD src, memory mem) %{ |
7541 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
|
7542 |
match(Set dst (MulVI src (LoadVector mem))); |
|
7543 |
format %{ "vpmulld $dst,$src,$mem\t! mul packed2I" %} |
|
7544 |
ins_encode %{ |
|
7545 |
int vector_len = 0; |
|
7546 |
__ vpmulld($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7547 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7548 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7549 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7550 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7551 |
instruct vmul4I(vecX dst, vecX src) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7552 |
predicate(UseSSE > 3 && n->as_Vector()->length() == 4); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7553 |
match(Set dst (MulVI dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7554 |
format %{ "pmulld $dst,$src\t! mul packed4I" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7555 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7556 |
__ pmulld($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7557 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7558 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7559 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7560 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7561 |
instruct vmul4I_reg(vecX dst, vecX src1, vecX src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7562 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7563 |
match(Set dst (MulVI src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7564 |
format %{ "vpmulld $dst,$src1,$src2\t! mul packed4I" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7565 |
ins_encode %{ |
30624 | 7566 |
int vector_len = 0; |
7567 |
__ vpmulld($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7568 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7569 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7570 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7571 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7572 |
instruct vmul4I_mem(vecX dst, vecX src, memory mem) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7573 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7574 |
match(Set dst (MulVI src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7575 |
format %{ "vpmulld $dst,$src,$mem\t! mul packed4I" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7576 |
ins_encode %{ |
30624 | 7577 |
int vector_len = 0; |
7578 |
__ vpmulld($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
7579 |
%} |
|
7580 |
ins_pipe( pipe_slow ); |
|
7581 |
%} |
|
7582 |
||
31410 | 7583 |
instruct vmul2L_reg(vecX dst, vecX src1, vecX src2) %{ |
7584 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 2 && VM_Version::supports_avx512dq()); |
|
7585 |
match(Set dst (MulVL src1 src2)); |
|
7586 |
format %{ "vpmullq $dst,$src1,$src2\t! mul packed2L" %} |
|
7587 |
ins_encode %{ |
|
7588 |
int vector_len = 0; |
|
7589 |
__ vpmullq($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
7590 |
%} |
|
7591 |
ins_pipe( pipe_slow ); |
|
7592 |
%} |
|
7593 |
||
7594 |
instruct vmul2L_mem(vecX dst, vecX src, memory mem) %{ |
|
7595 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 2 && VM_Version::supports_avx512dq()); |
|
7596 |
match(Set dst (MulVL src (LoadVector mem))); |
|
7597 |
format %{ "vpmullq $dst,$src,$mem\t! mul packed2L" %} |
|
7598 |
ins_encode %{ |
|
7599 |
int vector_len = 0; |
|
7600 |
__ vpmullq($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
7601 |
%} |
|
7602 |
ins_pipe( pipe_slow ); |
|
7603 |
%} |
|
7604 |
||
30624 | 7605 |
instruct vmul4L_reg(vecY dst, vecY src1, vecY src2) %{ |
7606 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 4 && VM_Version::supports_avx512dq()); |
|
7607 |
match(Set dst (MulVL src1 src2)); |
|
7608 |
format %{ "vpmullq $dst,$src1,$src2\t! mul packed4L" %} |
|
7609 |
ins_encode %{ |
|
7610 |
int vector_len = 1; |
|
7611 |
__ vpmullq($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
7612 |
%} |
|
7613 |
ins_pipe( pipe_slow ); |
|
7614 |
%} |
|
7615 |
||
7616 |
instruct vmul4L_mem(vecY dst, vecY src, memory mem) %{ |
|
7617 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 4 && VM_Version::supports_avx512dq()); |
|
7618 |
match(Set dst (MulVL src (LoadVector mem))); |
|
7619 |
format %{ "vpmullq $dst,$src,$mem\t! mul packed4L" %} |
|
7620 |
ins_encode %{ |
|
7621 |
int vector_len = 1; |
|
7622 |
__ vpmullq($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7623 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7624 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7625 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7626 |
|
30624 | 7627 |
instruct vmul8L_reg(vecZ dst, vecZ src1, vecZ src2) %{ |
7628 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 8 && VM_Version::supports_avx512dq()); |
|
7629 |
match(Set dst (MulVL src1 src2)); |
|
7630 |
format %{ "vpmullq $dst,$src1,$src2\t! mul packed8L" %} |
|
7631 |
ins_encode %{ |
|
7632 |
int vector_len = 2; |
|
7633 |
__ vpmullq($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
7634 |
%} |
|
7635 |
ins_pipe( pipe_slow ); |
|
7636 |
%} |
|
7637 |
||
31410 | 7638 |
instruct vmul8L_mem(vecZ dst, vecZ src, memory mem) %{ |
7639 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 8 && VM_Version::supports_avx512dq()); |
|
7640 |
match(Set dst (MulVL src (LoadVector mem))); |
|
7641 |
format %{ "vpmullq $dst,$src,$mem\t! mul packed8L" %} |
|
30624 | 7642 |
ins_encode %{ |
7643 |
int vector_len = 2; |
|
31410 | 7644 |
__ vpmullq($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
7645 |
%} |
|
7646 |
ins_pipe( pipe_slow ); |
|
7647 |
%} |
|
7648 |
||
7649 |
instruct vmul8I_reg(vecY dst, vecY src1, vecY src2) %{ |
|
7650 |
predicate(UseAVX > 1 && n->as_Vector()->length() == 8); |
|
7651 |
match(Set dst (MulVI src1 src2)); |
|
7652 |
format %{ "vpmulld $dst,$src1,$src2\t! mul packed8I" %} |
|
7653 |
ins_encode %{ |
|
7654 |
int vector_len = 1; |
|
30624 | 7655 |
__ vpmulld($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7656 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7657 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7658 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7659 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7660 |
instruct vmul8I_mem(vecY dst, vecY src, memory mem) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7661 |
predicate(UseAVX > 1 && n->as_Vector()->length() == 8); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7662 |
match(Set dst (MulVI src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7663 |
format %{ "vpmulld $dst,$src,$mem\t! mul packed8I" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7664 |
ins_encode %{ |
30624 | 7665 |
int vector_len = 1; |
7666 |
__ vpmulld($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
7667 |
%} |
|
7668 |
ins_pipe( pipe_slow ); |
|
7669 |
%} |
|
7670 |
||
31410 | 7671 |
instruct vmul16I_reg(vecZ dst, vecZ src1, vecZ src2) %{ |
7672 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 16); |
|
7673 |
match(Set dst (MulVI src1 src2)); |
|
7674 |
format %{ "vpmulld $dst,$src1,$src2\t! mul packed16I" %} |
|
30624 | 7675 |
ins_encode %{ |
7676 |
int vector_len = 2; |
|
31410 | 7677 |
__ vpmulld($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
30624 | 7678 |
%} |
7679 |
ins_pipe( pipe_slow ); |
|
7680 |
%} |
|
7681 |
||
7682 |
instruct vmul16I_mem(vecZ dst, vecZ src, memory mem) %{ |
|
7683 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 16); |
|
7684 |
match(Set dst (MulVI src (LoadVector mem))); |
|
7685 |
format %{ "vpmulld $dst,$src,$mem\t! mul packed16I" %} |
|
7686 |
ins_encode %{ |
|
7687 |
int vector_len = 2; |
|
7688 |
__ vpmulld($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7689 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7690 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7691 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7692 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7693 |
// Floats vector mul |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7694 |
instruct vmul2F(vecD dst, vecD src) %{ |
51857 | 7695 |
predicate(UseAVX == 0 && n->as_Vector()->length() == 2); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7696 |
match(Set dst (MulVF dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7697 |
format %{ "mulps $dst,$src\t! mul packed2F" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7698 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7699 |
__ mulps($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7700 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7701 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7702 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7703 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7704 |
instruct vmul2F_reg(vecD dst, vecD src1, vecD src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7705 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7706 |
match(Set dst (MulVF src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7707 |
format %{ "vmulps $dst,$src1,$src2\t! mul packed2F" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7708 |
ins_encode %{ |
30624 | 7709 |
int vector_len = 0; |
7710 |
__ vmulps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7711 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7712 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7713 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7714 |
|
31410 | 7715 |
instruct vmul2F_mem(vecD dst, vecD src, memory mem) %{ |
7716 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
|
7717 |
match(Set dst (MulVF src (LoadVector mem))); |
|
7718 |
format %{ "vmulps $dst,$src,$mem\t! mul packed2F" %} |
|
7719 |
ins_encode %{ |
|
7720 |
int vector_len = 0; |
|
7721 |
__ vmulps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
7722 |
%} |
|
7723 |
ins_pipe( pipe_slow ); |
|
7724 |
%} |
|
7725 |
||
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7726 |
instruct vmul4F(vecX dst, vecX src) %{ |
51857 | 7727 |
predicate(UseAVX == 0 && n->as_Vector()->length() == 4); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7728 |
match(Set dst (MulVF dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7729 |
format %{ "mulps $dst,$src\t! mul packed4F" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7730 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7731 |
__ mulps($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7732 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7733 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7734 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7735 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7736 |
instruct vmul4F_reg(vecX dst, vecX src1, vecX src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7737 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7738 |
match(Set dst (MulVF src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7739 |
format %{ "vmulps $dst,$src1,$src2\t! mul packed4F" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7740 |
ins_encode %{ |
30624 | 7741 |
int vector_len = 0; |
7742 |
__ vmulps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7743 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7744 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7745 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7746 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7747 |
instruct vmul4F_mem(vecX dst, vecX src, memory mem) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7748 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7749 |
match(Set dst (MulVF src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7750 |
format %{ "vmulps $dst,$src,$mem\t! mul packed4F" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7751 |
ins_encode %{ |
30624 | 7752 |
int vector_len = 0; |
7753 |
__ vmulps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7754 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7755 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7756 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7757 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7758 |
instruct vmul8F_reg(vecY dst, vecY src1, vecY src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7759 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7760 |
match(Set dst (MulVF src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7761 |
format %{ "vmulps $dst,$src1,$src2\t! mul packed8F" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7762 |
ins_encode %{ |
30624 | 7763 |
int vector_len = 1; |
7764 |
__ vmulps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7765 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7766 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7767 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7768 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7769 |
instruct vmul8F_mem(vecY dst, vecY src, memory mem) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7770 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7771 |
match(Set dst (MulVF src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7772 |
format %{ "vmulps $dst,$src,$mem\t! mul packed8F" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7773 |
ins_encode %{ |
30624 | 7774 |
int vector_len = 1; |
7775 |
__ vmulps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
7776 |
%} |
|
7777 |
ins_pipe( pipe_slow ); |
|
7778 |
%} |
|
7779 |
||
7780 |
instruct vmul16F_reg(vecZ dst, vecZ src1, vecZ src2) %{ |
|
7781 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 16); |
|
7782 |
match(Set dst (MulVF src1 src2)); |
|
7783 |
format %{ "vmulps $dst,$src1,$src2\t! mul packed16F" %} |
|
7784 |
ins_encode %{ |
|
7785 |
int vector_len = 2; |
|
7786 |
__ vmulps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
7787 |
%} |
|
7788 |
ins_pipe( pipe_slow ); |
|
7789 |
%} |
|
7790 |
||
7791 |
instruct vmul16F_mem(vecZ dst, vecZ src, memory mem) %{ |
|
7792 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 16); |
|
7793 |
match(Set dst (MulVF src (LoadVector mem))); |
|
7794 |
format %{ "vmulps $dst,$src,$mem\t! mul packed16F" %} |
|
7795 |
ins_encode %{ |
|
7796 |
int vector_len = 2; |
|
7797 |
__ vmulps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7798 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7799 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7800 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7801 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7802 |
// Doubles vector mul |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7803 |
instruct vmul2D(vecX dst, vecX src) %{ |
51857 | 7804 |
predicate(UseAVX == 0 && n->as_Vector()->length() == 2); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7805 |
match(Set dst (MulVD dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7806 |
format %{ "mulpd $dst,$src\t! mul packed2D" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7807 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7808 |
__ mulpd($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7809 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7810 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7811 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7812 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7813 |
instruct vmul2D_reg(vecX dst, vecX src1, vecX src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7814 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7815 |
match(Set dst (MulVD src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7816 |
format %{ "vmulpd $dst,$src1,$src2\t! mul packed2D" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7817 |
ins_encode %{ |
30624 | 7818 |
int vector_len = 0; |
7819 |
__ vmulpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7820 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7821 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7822 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7823 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7824 |
instruct vmul2D_mem(vecX dst, vecX src, memory mem) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7825 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7826 |
match(Set dst (MulVD src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7827 |
format %{ "vmulpd $dst,$src,$mem\t! mul packed2D" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7828 |
ins_encode %{ |
30624 | 7829 |
int vector_len = 0; |
7830 |
__ vmulpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7831 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7832 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7833 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7834 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7835 |
instruct vmul4D_reg(vecY dst, vecY src1, vecY src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7836 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7837 |
match(Set dst (MulVD src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7838 |
format %{ "vmulpd $dst,$src1,$src2\t! mul packed4D" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7839 |
ins_encode %{ |
30624 | 7840 |
int vector_len = 1; |
7841 |
__ vmulpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7842 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7843 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7844 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7845 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7846 |
instruct vmul4D_mem(vecY dst, vecY src, memory mem) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7847 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7848 |
match(Set dst (MulVD src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7849 |
format %{ "vmulpd $dst,$src,$mem\t! mul packed4D" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7850 |
ins_encode %{ |
30624 | 7851 |
int vector_len = 1; |
7852 |
__ vmulpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
7853 |
%} |
|
7854 |
ins_pipe( pipe_slow ); |
|
7855 |
%} |
|
7856 |
||
7857 |
instruct vmul8D_reg(vecZ dst, vecZ src1, vecZ src2) %{ |
|
7858 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 8); |
|
7859 |
match(Set dst (MulVD src1 src2)); |
|
7860 |
format %{ "vmulpd $dst k0,$src1,$src2\t! mul packed8D" %} |
|
7861 |
ins_encode %{ |
|
7862 |
int vector_len = 2; |
|
7863 |
__ vmulpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
7864 |
%} |
|
7865 |
ins_pipe( pipe_slow ); |
|
7866 |
%} |
|
7867 |
||
7868 |
instruct vmul8D_mem(vecZ dst, vecZ src, memory mem) %{ |
|
7869 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 8); |
|
7870 |
match(Set dst (MulVD src (LoadVector mem))); |
|
7871 |
format %{ "vmulpd $dst k0,$src,$mem\t! mul packed8D" %} |
|
7872 |
ins_encode %{ |
|
7873 |
int vector_len = 2; |
|
7874 |
__ vmulpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7875 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7876 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7877 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7878 |
|
51857 | 7879 |
instruct vcmov8F_reg(legVecY dst, legVecY src1, legVecY src2, immI8 cop, cmpOp_vcmppd copnd) %{ |
7880 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
|
48309 | 7881 |
match(Set dst (CMoveVF (Binary copnd cop) (Binary src1 src2))); |
7882 |
effect(TEMP dst, USE src1, USE src2); |
|
7883 |
format %{ "cmpps.$copnd $dst, $src1, $src2 ! vcmovevf, cond=$cop\n\t" |
|
7884 |
"blendvps $dst,$src1,$src2,$dst ! vcmovevf\n\t" |
|
7885 |
%} |
|
7886 |
ins_encode %{ |
|
7887 |
int vector_len = 1; |
|
7888 |
int cond = (Assembler::Condition)($copnd$$cmpcode); |
|
7889 |
__ cmpps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, cond, vector_len); |
|
7890 |
__ blendvps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, $dst$$XMMRegister, vector_len); |
|
7891 |
%} |
|
7892 |
ins_pipe( pipe_slow ); |
|
7893 |
%} |
|
7894 |
||
51857 | 7895 |
instruct vcmov4D_reg(legVecY dst, legVecY src1, legVecY src2, immI8 cop, cmpOp_vcmppd copnd) %{ |
7896 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
|
33469
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
7897 |
match(Set dst (CMoveVD (Binary copnd cop) (Binary src1 src2))); |
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
7898 |
effect(TEMP dst, USE src1, USE src2); |
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
7899 |
format %{ "cmppd.$copnd $dst, $src1, $src2 ! vcmovevd, cond=$cop\n\t" |
42039 | 7900 |
"blendvpd $dst,$src1,$src2,$dst ! vcmovevd\n\t" |
33469
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
7901 |
%} |
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
7902 |
ins_encode %{ |
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
7903 |
int vector_len = 1; |
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
7904 |
int cond = (Assembler::Condition)($copnd$$cmpcode); |
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
7905 |
__ cmppd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, cond, vector_len); |
42039 | 7906 |
__ blendvpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, $dst$$XMMRegister, vector_len); |
33469
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
7907 |
%} |
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
7908 |
ins_pipe( pipe_slow ); |
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
7909 |
%} |
30f4811eded0
8139340: SuperWord enhancement to support vector conditional move (CMovVD) on Intel AVX cpu
iveresov
parents:
33065
diff
changeset
|
7910 |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7911 |
// --------------------------------- DIV -------------------------------------- |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7912 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7913 |
// Floats vector div |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7914 |
instruct vdiv2F(vecD dst, vecD src) %{ |
51857 | 7915 |
predicate(UseAVX == 0 && n->as_Vector()->length() == 2); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7916 |
match(Set dst (DivVF dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7917 |
format %{ "divps $dst,$src\t! div packed2F" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7918 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7919 |
__ divps($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7920 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7921 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7922 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7923 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7924 |
instruct vdiv2F_reg(vecD dst, vecD src1, vecD src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7925 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7926 |
match(Set dst (DivVF src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7927 |
format %{ "vdivps $dst,$src1,$src2\t! div packed2F" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7928 |
ins_encode %{ |
30624 | 7929 |
int vector_len = 0; |
7930 |
__ vdivps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7931 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7932 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7933 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7934 |
|
31410 | 7935 |
instruct vdiv2F_mem(vecD dst, vecD src, memory mem) %{ |
7936 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
|
7937 |
match(Set dst (DivVF src (LoadVector mem))); |
|
7938 |
format %{ "vdivps $dst,$src,$mem\t! div packed2F" %} |
|
7939 |
ins_encode %{ |
|
7940 |
int vector_len = 0; |
|
7941 |
__ vdivps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
7942 |
%} |
|
7943 |
ins_pipe( pipe_slow ); |
|
7944 |
%} |
|
7945 |
||
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7946 |
instruct vdiv4F(vecX dst, vecX src) %{ |
51857 | 7947 |
predicate(UseAVX == 0 && n->as_Vector()->length() == 4); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7948 |
match(Set dst (DivVF dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7949 |
format %{ "divps $dst,$src\t! div packed4F" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7950 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7951 |
__ divps($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7952 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7953 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7954 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7955 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7956 |
instruct vdiv4F_reg(vecX dst, vecX src1, vecX src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7957 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7958 |
match(Set dst (DivVF src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7959 |
format %{ "vdivps $dst,$src1,$src2\t! div packed4F" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7960 |
ins_encode %{ |
30624 | 7961 |
int vector_len = 0; |
7962 |
__ vdivps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7963 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7964 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7965 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7966 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7967 |
instruct vdiv4F_mem(vecX dst, vecX src, memory mem) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7968 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7969 |
match(Set dst (DivVF src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7970 |
format %{ "vdivps $dst,$src,$mem\t! div packed4F" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7971 |
ins_encode %{ |
30624 | 7972 |
int vector_len = 0; |
7973 |
__ vdivps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7974 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7975 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7976 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7977 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7978 |
instruct vdiv8F_reg(vecY dst, vecY src1, vecY src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7979 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7980 |
match(Set dst (DivVF src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7981 |
format %{ "vdivps $dst,$src1,$src2\t! div packed8F" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7982 |
ins_encode %{ |
30624 | 7983 |
int vector_len = 1; |
7984 |
__ vdivps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7985 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7986 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7987 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7988 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7989 |
instruct vdiv8F_mem(vecY dst, vecY src, memory mem) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7990 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7991 |
match(Set dst (DivVF src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7992 |
format %{ "vdivps $dst,$src,$mem\t! div packed8F" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
7993 |
ins_encode %{ |
30624 | 7994 |
int vector_len = 1; |
7995 |
__ vdivps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
7996 |
%} |
|
7997 |
ins_pipe( pipe_slow ); |
|
7998 |
%} |
|
7999 |
||
8000 |
instruct vdiv16F_reg(vecZ dst, vecZ src1, vecZ src2) %{ |
|
8001 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 16); |
|
8002 |
match(Set dst (DivVF src1 src2)); |
|
8003 |
format %{ "vdivps $dst,$src1,$src2\t! div packed16F" %} |
|
8004 |
ins_encode %{ |
|
8005 |
int vector_len = 2; |
|
8006 |
__ vdivps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
8007 |
%} |
|
8008 |
ins_pipe( pipe_slow ); |
|
8009 |
%} |
|
8010 |
||
8011 |
instruct vdiv16F_mem(vecZ dst, vecZ src, memory mem) %{ |
|
8012 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 16); |
|
8013 |
match(Set dst (DivVF src (LoadVector mem))); |
|
8014 |
format %{ "vdivps $dst,$src,$mem\t! div packed16F" %} |
|
8015 |
ins_encode %{ |
|
8016 |
int vector_len = 2; |
|
8017 |
__ vdivps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8018 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8019 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8020 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8021 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8022 |
// Doubles vector div |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8023 |
instruct vdiv2D(vecX dst, vecX src) %{ |
51857 | 8024 |
predicate(UseAVX == 0 && n->as_Vector()->length() == 2); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8025 |
match(Set dst (DivVD dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8026 |
format %{ "divpd $dst,$src\t! div packed2D" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8027 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8028 |
__ divpd($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8029 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8030 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8031 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8032 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8033 |
instruct vdiv2D_reg(vecX dst, vecX src1, vecX src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8034 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8035 |
match(Set dst (DivVD src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8036 |
format %{ "vdivpd $dst,$src1,$src2\t! div packed2D" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8037 |
ins_encode %{ |
30624 | 8038 |
int vector_len = 0; |
8039 |
__ vdivpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8040 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8041 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8042 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8043 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8044 |
instruct vdiv2D_mem(vecX dst, vecX src, memory mem) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8045 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8046 |
match(Set dst (DivVD src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8047 |
format %{ "vdivpd $dst,$src,$mem\t! div packed2D" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8048 |
ins_encode %{ |
30624 | 8049 |
int vector_len = 0; |
8050 |
__ vdivpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8051 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8052 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8053 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8054 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8055 |
instruct vdiv4D_reg(vecY dst, vecY src1, vecY src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8056 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8057 |
match(Set dst (DivVD src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8058 |
format %{ "vdivpd $dst,$src1,$src2\t! div packed4D" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8059 |
ins_encode %{ |
30624 | 8060 |
int vector_len = 1; |
8061 |
__ vdivpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8062 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8063 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8064 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8065 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8066 |
instruct vdiv4D_mem(vecY dst, vecY src, memory mem) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8067 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8068 |
match(Set dst (DivVD src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8069 |
format %{ "vdivpd $dst,$src,$mem\t! div packed4D" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8070 |
ins_encode %{ |
30624 | 8071 |
int vector_len = 1; |
8072 |
__ vdivpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
8073 |
%} |
|
8074 |
ins_pipe( pipe_slow ); |
|
8075 |
%} |
|
8076 |
||
8077 |
instruct vdiv8D_reg(vecZ dst, vecZ src1, vecZ src2) %{ |
|
8078 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 8); |
|
8079 |
match(Set dst (DivVD src1 src2)); |
|
8080 |
format %{ "vdivpd $dst,$src1,$src2\t! div packed8D" %} |
|
8081 |
ins_encode %{ |
|
8082 |
int vector_len = 2; |
|
8083 |
__ vdivpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
8084 |
%} |
|
8085 |
ins_pipe( pipe_slow ); |
|
8086 |
%} |
|
8087 |
||
8088 |
instruct vdiv8D_mem(vecZ dst, vecZ src, memory mem) %{ |
|
8089 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 8); |
|
8090 |
match(Set dst (DivVD src (LoadVector mem))); |
|
8091 |
format %{ "vdivpd $dst,$src,$mem\t! div packed8D" %} |
|
8092 |
ins_encode %{ |
|
8093 |
int vector_len = 2; |
|
8094 |
__ vdivpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8095 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8096 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8097 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8098 |
|
32723
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8099 |
// --------------------------------- Sqrt -------------------------------------- |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8100 |
|
48089
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8101 |
// Floating point vector sqrt |
32723
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8102 |
instruct vsqrt2D_reg(vecX dst, vecX src) %{ |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8103 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8104 |
match(Set dst (SqrtVD src)); |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8105 |
format %{ "vsqrtpd $dst,$src\t! sqrt packed2D" %} |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8106 |
ins_encode %{ |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8107 |
int vector_len = 0; |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8108 |
__ vsqrtpd($dst$$XMMRegister, $src$$XMMRegister, vector_len); |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8109 |
%} |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8110 |
ins_pipe( pipe_slow ); |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8111 |
%} |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8112 |
|
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8113 |
instruct vsqrt2D_mem(vecX dst, memory mem) %{ |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8114 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8115 |
match(Set dst (SqrtVD (LoadVector mem))); |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8116 |
format %{ "vsqrtpd $dst,$mem\t! sqrt packed2D" %} |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8117 |
ins_encode %{ |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8118 |
int vector_len = 0; |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8119 |
__ vsqrtpd($dst$$XMMRegister, $mem$$Address, vector_len); |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8120 |
%} |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8121 |
ins_pipe( pipe_slow ); |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8122 |
%} |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8123 |
|
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8124 |
instruct vsqrt4D_reg(vecY dst, vecY src) %{ |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8125 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8126 |
match(Set dst (SqrtVD src)); |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8127 |
format %{ "vsqrtpd $dst,$src\t! sqrt packed4D" %} |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8128 |
ins_encode %{ |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8129 |
int vector_len = 1; |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8130 |
__ vsqrtpd($dst$$XMMRegister, $src$$XMMRegister, vector_len); |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8131 |
%} |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8132 |
ins_pipe( pipe_slow ); |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8133 |
%} |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8134 |
|
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8135 |
instruct vsqrt4D_mem(vecY dst, memory mem) %{ |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8136 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8137 |
match(Set dst (SqrtVD (LoadVector mem))); |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8138 |
format %{ "vsqrtpd $dst,$mem\t! sqrt packed4D" %} |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8139 |
ins_encode %{ |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8140 |
int vector_len = 1; |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8141 |
__ vsqrtpd($dst$$XMMRegister, $mem$$Address, vector_len); |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8142 |
%} |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8143 |
ins_pipe( pipe_slow ); |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8144 |
%} |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8145 |
|
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8146 |
instruct vsqrt8D_reg(vecZ dst, vecZ src) %{ |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8147 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 8); |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8148 |
match(Set dst (SqrtVD src)); |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8149 |
format %{ "vsqrtpd $dst,$src\t! sqrt packed8D" %} |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8150 |
ins_encode %{ |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8151 |
int vector_len = 2; |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8152 |
__ vsqrtpd($dst$$XMMRegister, $src$$XMMRegister, vector_len); |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8153 |
%} |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8154 |
ins_pipe( pipe_slow ); |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8155 |
%} |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8156 |
|
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8157 |
instruct vsqrt8D_mem(vecZ dst, memory mem) %{ |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8158 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 8); |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8159 |
match(Set dst (SqrtVD (LoadVector mem))); |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8160 |
format %{ "vsqrtpd $dst,$mem\t! sqrt packed8D" %} |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8161 |
ins_encode %{ |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8162 |
int vector_len = 2; |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8163 |
__ vsqrtpd($dst$$XMMRegister, $mem$$Address, vector_len); |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8164 |
%} |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8165 |
ins_pipe( pipe_slow ); |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8166 |
%} |
56534fb3d71a
8135028: support for vectorizing double precision sqrt
mcberg
parents:
32082
diff
changeset
|
8167 |
|
48089
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8168 |
instruct vsqrt2F_reg(vecD dst, vecD src) %{ |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8169 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8170 |
match(Set dst (SqrtVF src)); |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8171 |
format %{ "vsqrtps $dst,$src\t! sqrt packed2F" %} |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8172 |
ins_encode %{ |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8173 |
int vector_len = 0; |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8174 |
__ vsqrtps($dst$$XMMRegister, $src$$XMMRegister, vector_len); |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8175 |
%} |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8176 |
ins_pipe( pipe_slow ); |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8177 |
%} |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8178 |
|
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8179 |
instruct vsqrt2F_mem(vecD dst, memory mem) %{ |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8180 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8181 |
match(Set dst (SqrtVF (LoadVector mem))); |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8182 |
format %{ "vsqrtps $dst,$mem\t! sqrt packed2F" %} |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8183 |
ins_encode %{ |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8184 |
int vector_len = 0; |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8185 |
__ vsqrtps($dst$$XMMRegister, $mem$$Address, vector_len); |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8186 |
%} |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8187 |
ins_pipe( pipe_slow ); |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8188 |
%} |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8189 |
|
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8190 |
instruct vsqrt4F_reg(vecX dst, vecX src) %{ |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8191 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8192 |
match(Set dst (SqrtVF src)); |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8193 |
format %{ "vsqrtps $dst,$src\t! sqrt packed4F" %} |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8194 |
ins_encode %{ |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8195 |
int vector_len = 0; |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8196 |
__ vsqrtps($dst$$XMMRegister, $src$$XMMRegister, vector_len); |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8197 |
%} |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8198 |
ins_pipe( pipe_slow ); |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8199 |
%} |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8200 |
|
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8201 |
instruct vsqrt4F_mem(vecX dst, memory mem) %{ |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8202 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8203 |
match(Set dst (SqrtVF (LoadVector mem))); |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8204 |
format %{ "vsqrtps $dst,$mem\t! sqrt packed4F" %} |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8205 |
ins_encode %{ |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8206 |
int vector_len = 0; |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8207 |
__ vsqrtps($dst$$XMMRegister, $mem$$Address, vector_len); |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8208 |
%} |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8209 |
ins_pipe( pipe_slow ); |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8210 |
%} |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8211 |
|
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8212 |
instruct vsqrt8F_reg(vecY dst, vecY src) %{ |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8213 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8214 |
match(Set dst (SqrtVF src)); |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8215 |
format %{ "vsqrtps $dst,$src\t! sqrt packed8F" %} |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8216 |
ins_encode %{ |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8217 |
int vector_len = 1; |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8218 |
__ vsqrtps($dst$$XMMRegister, $src$$XMMRegister, vector_len); |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8219 |
%} |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8220 |
ins_pipe( pipe_slow ); |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8221 |
%} |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8222 |
|
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8223 |
instruct vsqrt8F_mem(vecY dst, memory mem) %{ |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8224 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8225 |
match(Set dst (SqrtVF (LoadVector mem))); |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8226 |
format %{ "vsqrtps $dst,$mem\t! sqrt packed8F" %} |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8227 |
ins_encode %{ |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8228 |
int vector_len = 1; |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8229 |
__ vsqrtps($dst$$XMMRegister, $mem$$Address, vector_len); |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8230 |
%} |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8231 |
ins_pipe( pipe_slow ); |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8232 |
%} |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8233 |
|
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8234 |
instruct vsqrt16F_reg(vecZ dst, vecZ src) %{ |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8235 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 16); |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8236 |
match(Set dst (SqrtVF src)); |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8237 |
format %{ "vsqrtps $dst,$src\t! sqrt packed16F" %} |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8238 |
ins_encode %{ |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8239 |
int vector_len = 2; |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8240 |
__ vsqrtps($dst$$XMMRegister, $src$$XMMRegister, vector_len); |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8241 |
%} |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8242 |
ins_pipe( pipe_slow ); |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8243 |
%} |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8244 |
|
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8245 |
instruct vsqrt16F_mem(vecZ dst, memory mem) %{ |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8246 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 16); |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8247 |
match(Set dst (SqrtVF (LoadVector mem))); |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8248 |
format %{ "vsqrtps $dst,$mem\t! sqrt packed16F" %} |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8249 |
ins_encode %{ |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8250 |
int vector_len = 2; |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8251 |
__ vsqrtps($dst$$XMMRegister, $mem$$Address, vector_len); |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8252 |
%} |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8253 |
ins_pipe( pipe_slow ); |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8254 |
%} |
22c9856fc2c2
8190800: Support vectorization of Math.sqrt() on floats
rlupusoru
parents:
47216
diff
changeset
|
8255 |
|
54750 | 8256 |
// ------------------------------ Shift --------------------------------------- |
8257 |
||
8258 |
// Left and right shift count vectors are the same on x86 |
|
8259 |
// (only lowest bits of xmm reg are used for count). |
|
8260 |
instruct vshiftcnt(vecS dst, rRegI cnt) %{ |
|
8261 |
match(Set dst (LShiftCntV cnt)); |
|
8262 |
match(Set dst (RShiftCntV cnt)); |
|
8263 |
format %{ "movdl $dst,$cnt\t! load shift count" %} |
|
8264 |
ins_encode %{ |
|
8265 |
__ movdl($dst$$XMMRegister, $cnt$$Register); |
|
8266 |
%} |
|
8267 |
ins_pipe( pipe_slow ); |
|
8268 |
%} |
|
8269 |
||
8270 |
instruct vshiftcntimm(vecS dst, immI8 cnt, rRegI tmp) %{ |
|
8271 |
match(Set dst cnt); |
|
8272 |
effect(TEMP tmp); |
|
8273 |
format %{ "movl $tmp,$cnt\t" |
|
8274 |
"movdl $dst,$tmp\t! load shift count" %} |
|
8275 |
ins_encode %{ |
|
8276 |
__ movl($tmp$$Register, $cnt$$constant); |
|
8277 |
__ movdl($dst$$XMMRegister, $tmp$$Register); |
|
8278 |
%} |
|
8279 |
ins_pipe( pipe_slow ); |
|
8280 |
%} |
|
8281 |
||
8282 |
// Byte vector shift |
|
8283 |
instruct vshift4B(vecS dst, vecS src, vecS shift, vecS tmp, rRegI scratch) %{ |
|
8284 |
predicate(UseSSE > 3 && n->as_Vector()->length() == 4); |
|
8285 |
match(Set dst (LShiftVB src shift)); |
|
8286 |
match(Set dst (RShiftVB src shift)); |
|
8287 |
match(Set dst (URShiftVB src shift)); |
|
55299
40320fb1920a
8224234: compiler/codegen/TestCharVect2.java fails in test_mulc
sviswanathan
parents:
55061
diff
changeset
|
8288 |
effect(TEMP dst, USE src, USE shift, TEMP tmp, TEMP scratch); |
54750 | 8289 |
format %{"vextendbw $tmp,$src\n\t" |
8290 |
"vshiftw $tmp,$shift\n\t" |
|
8291 |
"movdqu $dst,[0x00ff00ff0x00ff00ff]\n\t" |
|
8292 |
"pand $dst,$tmp\n\t" |
|
8293 |
"packuswb $dst,$dst\n\t ! packed4B shift" %} |
|
8294 |
ins_encode %{ |
|
8295 |
int opcode = this->as_Mach()->ideal_Opcode(); |
|
8296 |
||
8297 |
__ vextendbw(opcode, $tmp$$XMMRegister, $src$$XMMRegister); |
|
8298 |
__ vshiftw(opcode, $tmp$$XMMRegister, $shift$$XMMRegister); |
|
8299 |
__ movdqu($dst$$XMMRegister, ExternalAddress(vector_short_to_byte_mask()), $scratch$$Register); |
|
8300 |
__ pand($dst$$XMMRegister, $tmp$$XMMRegister); |
|
8301 |
__ packuswb($dst$$XMMRegister, $dst$$XMMRegister); |
|
8302 |
%} |
|
8303 |
ins_pipe( pipe_slow ); |
|
8304 |
%} |
|
8305 |
||
8306 |
instruct vshift8B(vecD dst, vecD src, vecS shift, vecD tmp, rRegI scratch) %{ |
|
8307 |
predicate(UseSSE > 3 && n->as_Vector()->length() == 8); |
|
8308 |
match(Set dst (LShiftVB src shift)); |
|
8309 |
match(Set dst (RShiftVB src shift)); |
|
8310 |
match(Set dst (URShiftVB src shift)); |
|
55299
40320fb1920a
8224234: compiler/codegen/TestCharVect2.java fails in test_mulc
sviswanathan
parents:
55061
diff
changeset
|
8311 |
effect(TEMP dst, USE src, USE shift, TEMP tmp, TEMP scratch); |
54750 | 8312 |
format %{"vextendbw $tmp,$src\n\t" |
8313 |
"vshiftw $tmp,$shift\n\t" |
|
8314 |
"movdqu $dst,[0x00ff00ff0x00ff00ff]\n\t" |
|
8315 |
"pand $dst,$tmp\n\t" |
|
8316 |
"packuswb $dst,$dst\n\t ! packed8B shift" %} |
|
8317 |
ins_encode %{ |
|
8318 |
int opcode = this->as_Mach()->ideal_Opcode(); |
|
8319 |
||
8320 |
__ vextendbw(opcode, $tmp$$XMMRegister, $src$$XMMRegister); |
|
8321 |
__ vshiftw(opcode, $tmp$$XMMRegister, $shift$$XMMRegister); |
|
8322 |
__ movdqu($dst$$XMMRegister, ExternalAddress(vector_short_to_byte_mask()), $scratch$$Register); |
|
8323 |
__ pand($dst$$XMMRegister, $tmp$$XMMRegister); |
|
8324 |
__ packuswb($dst$$XMMRegister, $dst$$XMMRegister); |
|
8325 |
%} |
|
8326 |
ins_pipe( pipe_slow ); |
|
8327 |
%} |
|
8328 |
||
8329 |
instruct vshift16B(vecX dst, vecX src, vecS shift, vecX tmp1, vecX tmp2, rRegI scratch) %{ |
|
8330 |
predicate(UseSSE > 3 && UseAVX <= 1 && n->as_Vector()->length() == 16); |
|
8331 |
match(Set dst (LShiftVB src shift)); |
|
8332 |
match(Set dst (RShiftVB src shift)); |
|
8333 |
match(Set dst (URShiftVB src shift)); |
|
55299
40320fb1920a
8224234: compiler/codegen/TestCharVect2.java fails in test_mulc
sviswanathan
parents:
55061
diff
changeset
|
8334 |
effect(TEMP dst, USE src, USE shift, TEMP tmp1, TEMP tmp2, TEMP scratch); |
54750 | 8335 |
format %{"vextendbw $tmp1,$src\n\t" |
8336 |
"vshiftw $tmp1,$shift\n\t" |
|
8337 |
"pshufd $tmp2,$src\n\t" |
|
8338 |
"vextendbw $tmp2,$tmp2\n\t" |
|
8339 |
"vshiftw $tmp2,$shift\n\t" |
|
8340 |
"movdqu $dst,[0x00ff00ff0x00ff00ff]\n\t" |
|
8341 |
"pand $tmp2,$dst\n\t" |
|
8342 |
"pand $dst,$tmp1\n\t" |
|
8343 |
"packuswb $dst,$tmp2\n\t! packed16B shift" %} |
|
8344 |
ins_encode %{ |
|
8345 |
int opcode = this->as_Mach()->ideal_Opcode(); |
|
8346 |
||
8347 |
__ vextendbw(opcode, $tmp1$$XMMRegister, $src$$XMMRegister); |
|
8348 |
__ vshiftw(opcode, $tmp1$$XMMRegister, $shift$$XMMRegister); |
|
8349 |
__ pshufd($tmp2$$XMMRegister, $src$$XMMRegister, 0xE); |
|
8350 |
__ vextendbw(opcode, $tmp2$$XMMRegister, $tmp2$$XMMRegister); |
|
8351 |
__ vshiftw(opcode, $tmp2$$XMMRegister, $shift$$XMMRegister); |
|
8352 |
__ movdqu($dst$$XMMRegister, ExternalAddress(vector_short_to_byte_mask()), $scratch$$Register); |
|
8353 |
__ pand($tmp2$$XMMRegister, $dst$$XMMRegister); |
|
8354 |
__ pand($dst$$XMMRegister, $tmp1$$XMMRegister); |
|
8355 |
__ packuswb($dst$$XMMRegister, $tmp2$$XMMRegister); |
|
8356 |
%} |
|
8357 |
ins_pipe( pipe_slow ); |
|
8358 |
%} |
|
8359 |
||
8360 |
instruct vshift16B_avx(vecX dst, vecX src, vecS shift, vecX tmp, rRegI scratch) %{ |
|
51857 | 8361 |
predicate(UseAVX > 1 && n->as_Vector()->length() == 16); |
54750 | 8362 |
match(Set dst (LShiftVB src shift)); |
8363 |
match(Set dst (RShiftVB src shift)); |
|
8364 |
match(Set dst (URShiftVB src shift)); |
|
55299
40320fb1920a
8224234: compiler/codegen/TestCharVect2.java fails in test_mulc
sviswanathan
parents:
55061
diff
changeset
|
8365 |
effect(TEMP dst, USE src, USE shift, TEMP tmp, TEMP scratch); |
54750 | 8366 |
format %{"vextendbw $tmp,$src\n\t" |
8367 |
"vshiftw $tmp,$tmp,$shift\n\t" |
|
8368 |
"vpand $tmp,$tmp,[0x00ff00ff0x00ff00ff]\n\t" |
|
8369 |
"vextracti128_high $dst,$tmp\n\t" |
|
8370 |
"vpackuswb $dst,$tmp,$dst\n\t! packed16B shift" %} |
|
8371 |
ins_encode %{ |
|
8372 |
int opcode = this->as_Mach()->ideal_Opcode(); |
|
8373 |
||
30624 | 8374 |
int vector_len = 1; |
54750 | 8375 |
__ vextendbw(opcode, $tmp$$XMMRegister, $src$$XMMRegister, vector_len); |
8376 |
__ vshiftw(opcode, $tmp$$XMMRegister, $tmp$$XMMRegister, $shift$$XMMRegister, vector_len); |
|
8377 |
__ vpand($tmp$$XMMRegister, $tmp$$XMMRegister, ExternalAddress(vector_short_to_byte_mask()), vector_len, $scratch$$Register); |
|
8378 |
__ vextracti128_high($dst$$XMMRegister, $tmp$$XMMRegister); |
|
8379 |
__ vpackuswb($dst$$XMMRegister, $tmp$$XMMRegister, $dst$$XMMRegister, 0); |
|
8380 |
%} |
|
8381 |
ins_pipe( pipe_slow ); |
|
8382 |
%} |
|
8383 |
||
8384 |
instruct vshift32B_avx(vecY dst, vecY src, vecS shift, vecY tmp, rRegI scratch) %{ |
|
8385 |
predicate(UseAVX > 1 && n->as_Vector()->length() == 32); |
|
8386 |
match(Set dst (LShiftVB src shift)); |
|
8387 |
match(Set dst (RShiftVB src shift)); |
|
8388 |
match(Set dst (URShiftVB src shift)); |
|
55299
40320fb1920a
8224234: compiler/codegen/TestCharVect2.java fails in test_mulc
sviswanathan
parents:
55061
diff
changeset
|
8389 |
effect(TEMP dst, USE src, USE shift, TEMP tmp, TEMP scratch); |
54750 | 8390 |
format %{"vextracti128_high $tmp,$src\n\t" |
8391 |
"vextendbw $tmp,$tmp\n\t" |
|
8392 |
"vextendbw $dst,$src\n\t" |
|
8393 |
"vshiftw $tmp,$tmp,$shift\n\t" |
|
8394 |
"vshiftw $dst,$dst,$shift\n\t" |
|
8395 |
"vpand $tmp,$tmp,[0x00ff00ff0x00ff00ff]\n\t" |
|
8396 |
"vpand $dst,$dst,[0x00ff00ff0x00ff00ff]\n\t" |
|
8397 |
"vpackuswb $dst,$dst,$tmp\n\t" |
|
8398 |
"vpermq $dst,$dst,0xD8\n\t! packed32B shift" %} |
|
8399 |
ins_encode %{ |
|
8400 |
int opcode = this->as_Mach()->ideal_Opcode(); |
|
8401 |
||
8402 |
int vector_len = 1; |
|
8403 |
__ vextracti128_high($tmp$$XMMRegister, $src$$XMMRegister); |
|
8404 |
__ vextendbw(opcode, $tmp$$XMMRegister, $tmp$$XMMRegister, vector_len); |
|
8405 |
__ vextendbw(opcode, $dst$$XMMRegister, $src$$XMMRegister, vector_len); |
|
8406 |
__ vshiftw(opcode, $tmp$$XMMRegister, $tmp$$XMMRegister, $shift$$XMMRegister, vector_len); |
|
8407 |
__ vshiftw(opcode, $dst$$XMMRegister, $dst$$XMMRegister, $shift$$XMMRegister, vector_len); |
|
8408 |
__ vpand($tmp$$XMMRegister, $tmp$$XMMRegister, ExternalAddress(vector_short_to_byte_mask()), vector_len, $scratch$$Register); |
|
8409 |
__ vpand($dst$$XMMRegister, $dst$$XMMRegister, ExternalAddress(vector_short_to_byte_mask()), vector_len, $scratch$$Register); |
|
8410 |
__ vpackuswb($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister, vector_len); |
|
8411 |
__ vpermq($dst$$XMMRegister, $dst$$XMMRegister, 0xD8, vector_len); |
|
8412 |
%} |
|
8413 |
ins_pipe( pipe_slow ); |
|
8414 |
%} |
|
8415 |
||
8416 |
instruct vshift64B_avx(vecZ dst, vecZ src, vecS shift, vecZ tmp1, vecZ tmp2, rRegI scratch) %{ |
|
8417 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 64); |
|
8418 |
match(Set dst (LShiftVB src shift)); |
|
8419 |
match(Set dst (RShiftVB src shift)); |
|
8420 |
match(Set dst (URShiftVB src shift)); |
|
55299
40320fb1920a
8224234: compiler/codegen/TestCharVect2.java fails in test_mulc
sviswanathan
parents:
55061
diff
changeset
|
8421 |
effect(TEMP dst, USE src, USE shift, TEMP tmp1, TEMP tmp2, TEMP scratch); |
54750 | 8422 |
format %{"vextracti64x4 $tmp1,$src\n\t" |
8423 |
"vextendbw $tmp1,$tmp1\n\t" |
|
8424 |
"vextendbw $tmp2,$src\n\t" |
|
8425 |
"vshiftw $tmp1,$tmp1,$shift\n\t" |
|
8426 |
"vshiftw $tmp2,$tmp2,$shift\n\t" |
|
8427 |
"vmovdqu $dst,[0x00ff00ff0x00ff00ff]\n\t" |
|
8428 |
"vpbroadcastd $dst,$dst\n\t" |
|
8429 |
"vpand $tmp1,$tmp1,$dst\n\t" |
|
8430 |
"vpand $tmp2,$tmp2,$dst\n\t" |
|
8431 |
"vpackuswb $dst,$tmp1,$tmp2\n\t" |
|
8432 |
"evmovdquq $tmp2, [0x0604020007050301]\n\t" |
|
8433 |
"vpermq $dst,$tmp2,$dst\n\t! packed64B shift" %} |
|
8434 |
ins_encode %{ |
|
8435 |
int opcode = this->as_Mach()->ideal_Opcode(); |
|
8436 |
||
30624 | 8437 |
int vector_len = 2; |
54750 | 8438 |
__ vextracti64x4($tmp1$$XMMRegister, $src$$XMMRegister, 1); |
8439 |
__ vextendbw(opcode, $tmp1$$XMMRegister, $tmp1$$XMMRegister, vector_len); |
|
8440 |
__ vextendbw(opcode, $tmp2$$XMMRegister, $src$$XMMRegister, vector_len); |
|
8441 |
__ vshiftw(opcode, $tmp1$$XMMRegister, $tmp1$$XMMRegister, $shift$$XMMRegister, vector_len); |
|
8442 |
__ vshiftw(opcode, $tmp2$$XMMRegister, $tmp2$$XMMRegister, $shift$$XMMRegister, vector_len); |
|
8443 |
__ vmovdqu($dst$$XMMRegister, ExternalAddress(vector_short_to_byte_mask()), $scratch$$Register); |
|
8444 |
__ vpbroadcastd($dst$$XMMRegister, $dst$$XMMRegister, vector_len); |
|
8445 |
__ vpand($tmp1$$XMMRegister, $tmp1$$XMMRegister, $dst$$XMMRegister, vector_len); |
|
8446 |
__ vpand($tmp2$$XMMRegister, $tmp2$$XMMRegister, $dst$$XMMRegister, vector_len); |
|
8447 |
__ vpackuswb($dst$$XMMRegister, $tmp1$$XMMRegister, $tmp2$$XMMRegister, vector_len); |
|
8448 |
__ evmovdquq($tmp2$$XMMRegister, ExternalAddress(vector_byte_perm_mask()), vector_len, $scratch$$Register); |
|
8449 |
__ vpermq($dst$$XMMRegister, $tmp2$$XMMRegister, $dst$$XMMRegister, vector_len); |
|
8450 |
%} |
|
8451 |
ins_pipe( pipe_slow ); |
|
8452 |
%} |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8453 |
|
14131
e376e3d428c9
8001183: incorrect results of char vectors right shift operaiton
kvn
parents:
13930
diff
changeset
|
8454 |
// Shorts vector logical right shift produces incorrect Java result |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8455 |
// for negative data because java code convert short value into int with |
14131
e376e3d428c9
8001183: incorrect results of char vectors right shift operaiton
kvn
parents:
13930
diff
changeset
|
8456 |
// sign extension before a shift. But char vectors are fine since chars are |
e376e3d428c9
8001183: incorrect results of char vectors right shift operaiton
kvn
parents:
13930
diff
changeset
|
8457 |
// unsigned values. |
54750 | 8458 |
// Shorts/Chars vector left shift |
8459 |
instruct vshist2S(vecS dst, vecS src, vecS shift) %{ |
|
8460 |
predicate(n->as_Vector()->length() == 2); |
|
8461 |
match(Set dst (LShiftVS src shift)); |
|
8462 |
match(Set dst (RShiftVS src shift)); |
|
14131
e376e3d428c9
8001183: incorrect results of char vectors right shift operaiton
kvn
parents:
13930
diff
changeset
|
8463 |
match(Set dst (URShiftVS src shift)); |
55299
40320fb1920a
8224234: compiler/codegen/TestCharVect2.java fails in test_mulc
sviswanathan
parents:
55061
diff
changeset
|
8464 |
effect(TEMP dst, USE src, USE shift); |
54750 | 8465 |
format %{ "vshiftw $dst,$src,$shift\t! shift packed2S" %} |
8466 |
ins_encode %{ |
|
8467 |
int opcode = this->as_Mach()->ideal_Opcode(); |
|
8468 |
if (UseAVX == 0) { |
|
8469 |
if ($dst$$XMMRegister != $src$$XMMRegister) |
|
8470 |
__ movflt($dst$$XMMRegister, $src$$XMMRegister); |
|
8471 |
__ vshiftw(opcode, $dst$$XMMRegister, $shift$$XMMRegister); |
|
8472 |
} else { |
|
8473 |
int vector_len = 0; |
|
8474 |
__ vshiftw(opcode, $dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector_len); |
|
8475 |
} |
|
8476 |
%} |
|
8477 |
ins_pipe( pipe_slow ); |
|
8478 |
%} |
|
8479 |
||
8480 |
instruct vshift4S(vecD dst, vecD src, vecS shift) %{ |
|
8481 |
predicate(n->as_Vector()->length() == 4); |
|
8482 |
match(Set dst (LShiftVS src shift)); |
|
8483 |
match(Set dst (RShiftVS src shift)); |
|
14131
e376e3d428c9
8001183: incorrect results of char vectors right shift operaiton
kvn
parents:
13930
diff
changeset
|
8484 |
match(Set dst (URShiftVS src shift)); |
55299
40320fb1920a
8224234: compiler/codegen/TestCharVect2.java fails in test_mulc
sviswanathan
parents:
55061
diff
changeset
|
8485 |
effect(TEMP dst, USE src, USE shift); |
54750 | 8486 |
format %{ "vshiftw $dst,$src,$shift\t! shift packed4S" %} |
8487 |
ins_encode %{ |
|
8488 |
int opcode = this->as_Mach()->ideal_Opcode(); |
|
8489 |
if (UseAVX == 0) { |
|
8490 |
if ($dst$$XMMRegister != $src$$XMMRegister) |
|
8491 |
__ movdbl($dst$$XMMRegister, $src$$XMMRegister); |
|
8492 |
__ vshiftw(opcode, $dst$$XMMRegister, $shift$$XMMRegister); |
|
8493 |
||
8494 |
} else { |
|
8495 |
int vector_len = 0; |
|
8496 |
__ vshiftw(opcode, $dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector_len); |
|
8497 |
} |
|
8498 |
%} |
|
8499 |
ins_pipe( pipe_slow ); |
|
8500 |
%} |
|
8501 |
||
8502 |
instruct vshift8S(vecX dst, vecX src, vecS shift) %{ |
|
8503 |
predicate(n->as_Vector()->length() == 8); |
|
8504 |
match(Set dst (LShiftVS src shift)); |
|
8505 |
match(Set dst (RShiftVS src shift)); |
|
14131
e376e3d428c9
8001183: incorrect results of char vectors right shift operaiton
kvn
parents:
13930
diff
changeset
|
8506 |
match(Set dst (URShiftVS src shift)); |
55299
40320fb1920a
8224234: compiler/codegen/TestCharVect2.java fails in test_mulc
sviswanathan
parents:
55061
diff
changeset
|
8507 |
effect(TEMP dst, USE src, USE shift); |
54750 | 8508 |
format %{ "vshiftw $dst,$src,$shift\t! shift packed8S" %} |
8509 |
ins_encode %{ |
|
8510 |
int opcode = this->as_Mach()->ideal_Opcode(); |
|
8511 |
if (UseAVX == 0) { |
|
8512 |
if ($dst$$XMMRegister != $src$$XMMRegister) |
|
8513 |
__ movdqu($dst$$XMMRegister, $src$$XMMRegister); |
|
8514 |
__ vshiftw(opcode, $dst$$XMMRegister, $shift$$XMMRegister); |
|
8515 |
} else { |
|
8516 |
int vector_len = 0; |
|
8517 |
__ vshiftw(opcode, $dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector_len); |
|
8518 |
} |
|
8519 |
%} |
|
8520 |
ins_pipe( pipe_slow ); |
|
8521 |
%} |
|
8522 |
||
8523 |
instruct vshift16S(vecY dst, vecY src, vecS shift) %{ |
|
8524 |
predicate(UseAVX > 1 && n->as_Vector()->length() == 16); |
|
8525 |
match(Set dst (LShiftVS src shift)); |
|
8526 |
match(Set dst (RShiftVS src shift)); |
|
14131
e376e3d428c9
8001183: incorrect results of char vectors right shift operaiton
kvn
parents:
13930
diff
changeset
|
8527 |
match(Set dst (URShiftVS src shift)); |
55299
40320fb1920a
8224234: compiler/codegen/TestCharVect2.java fails in test_mulc
sviswanathan
parents:
55061
diff
changeset
|
8528 |
effect(DEF dst, USE src, USE shift); |
54750 | 8529 |
format %{ "vshiftw $dst,$src,$shift\t! shift packed16S" %} |
14131
e376e3d428c9
8001183: incorrect results of char vectors right shift operaiton
kvn
parents:
13930
diff
changeset
|
8530 |
ins_encode %{ |
30624 | 8531 |
int vector_len = 1; |
54750 | 8532 |
int opcode = this->as_Mach()->ideal_Opcode(); |
8533 |
__ vshiftw(opcode, $dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector_len); |
|
8534 |
%} |
|
8535 |
ins_pipe( pipe_slow ); |
|
8536 |
%} |
|
8537 |
||
8538 |
instruct vshift32S(vecZ dst, vecZ src, vecS shift) %{ |
|
8539 |
predicate(UseAVX > 2 && VM_Version::supports_avx512bw() && n->as_Vector()->length() == 32); |
|
8540 |
match(Set dst (LShiftVS src shift)); |
|
8541 |
match(Set dst (RShiftVS src shift)); |
|
14131
e376e3d428c9
8001183: incorrect results of char vectors right shift operaiton
kvn
parents:
13930
diff
changeset
|
8542 |
match(Set dst (URShiftVS src shift)); |
55299
40320fb1920a
8224234: compiler/codegen/TestCharVect2.java fails in test_mulc
sviswanathan
parents:
55061
diff
changeset
|
8543 |
effect(DEF dst, USE src, USE shift); |
54750 | 8544 |
format %{ "vshiftw $dst,$src,$shift\t! shift packed32S" %} |
30624 | 8545 |
ins_encode %{ |
8546 |
int vector_len = 2; |
|
54750 | 8547 |
int opcode = this->as_Mach()->ideal_Opcode(); |
8548 |
__ vshiftw(opcode, $dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector_len); |
|
8549 |
%} |
|
8550 |
ins_pipe( pipe_slow ); |
|
8551 |
%} |
|
8552 |
||
8553 |
// Integers vector left shift |
|
8554 |
instruct vshift2I(vecD dst, vecD src, vecS shift) %{ |
|
8555 |
predicate(n->as_Vector()->length() == 2); |
|
8556 |
match(Set dst (LShiftVI src shift)); |
|
8557 |
match(Set dst (RShiftVI src shift)); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8558 |
match(Set dst (URShiftVI src shift)); |
55299
40320fb1920a
8224234: compiler/codegen/TestCharVect2.java fails in test_mulc
sviswanathan
parents:
55061
diff
changeset
|
8559 |
effect(TEMP dst, USE src, USE shift); |
54750 | 8560 |
format %{ "vshiftd $dst,$src,$shift\t! shift packed2I" %} |
8561 |
ins_encode %{ |
|
8562 |
int opcode = this->as_Mach()->ideal_Opcode(); |
|
8563 |
if (UseAVX == 0) { |
|
8564 |
if ($dst$$XMMRegister != $src$$XMMRegister) |
|
8565 |
__ movdbl($dst$$XMMRegister, $src$$XMMRegister); |
|
8566 |
__ vshiftd(opcode, $dst$$XMMRegister, $shift$$XMMRegister); |
|
8567 |
} else { |
|
8568 |
int vector_len = 0; |
|
8569 |
__ vshiftd(opcode, $dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector_len); |
|
8570 |
} |
|
8571 |
%} |
|
8572 |
ins_pipe( pipe_slow ); |
|
8573 |
%} |
|
8574 |
||
8575 |
instruct vshift4I(vecX dst, vecX src, vecS shift) %{ |
|
8576 |
predicate(n->as_Vector()->length() == 4); |
|
8577 |
match(Set dst (LShiftVI src shift)); |
|
8578 |
match(Set dst (RShiftVI src shift)); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8579 |
match(Set dst (URShiftVI src shift)); |
55299
40320fb1920a
8224234: compiler/codegen/TestCharVect2.java fails in test_mulc
sviswanathan
parents:
55061
diff
changeset
|
8580 |
effect(TEMP dst, USE src, USE shift); |
54750 | 8581 |
format %{ "vshiftd $dst,$src,$shift\t! shift packed4I" %} |
8582 |
ins_encode %{ |
|
8583 |
int opcode = this->as_Mach()->ideal_Opcode(); |
|
8584 |
if (UseAVX == 0) { |
|
8585 |
if ($dst$$XMMRegister != $src$$XMMRegister) |
|
8586 |
__ movdqu($dst$$XMMRegister, $src$$XMMRegister); |
|
8587 |
__ vshiftd(opcode, $dst$$XMMRegister, $shift$$XMMRegister); |
|
8588 |
} else { |
|
8589 |
int vector_len = 0; |
|
8590 |
__ vshiftd(opcode, $dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector_len); |
|
8591 |
} |
|
8592 |
%} |
|
8593 |
ins_pipe( pipe_slow ); |
|
8594 |
%} |
|
8595 |
||
8596 |
instruct vshift8I(vecY dst, vecY src, vecS shift) %{ |
|
8597 |
predicate(UseAVX > 1 && n->as_Vector()->length() == 8); |
|
8598 |
match(Set dst (LShiftVI src shift)); |
|
8599 |
match(Set dst (RShiftVI src shift)); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8600 |
match(Set dst (URShiftVI src shift)); |
55299
40320fb1920a
8224234: compiler/codegen/TestCharVect2.java fails in test_mulc
sviswanathan
parents:
55061
diff
changeset
|
8601 |
effect(DEF dst, USE src, USE shift); |
54750 | 8602 |
format %{ "vshiftd $dst,$src,$shift\t! shift packed8I" %} |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8603 |
ins_encode %{ |
30624 | 8604 |
int vector_len = 1; |
54750 | 8605 |
int opcode = this->as_Mach()->ideal_Opcode(); |
8606 |
__ vshiftd(opcode, $dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector_len); |
|
8607 |
%} |
|
8608 |
ins_pipe( pipe_slow ); |
|
8609 |
%} |
|
8610 |
||
8611 |
instruct vshift16I(vecZ dst, vecZ src, vecS shift) %{ |
|
8612 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 16); |
|
8613 |
match(Set dst (LShiftVI src shift)); |
|
8614 |
match(Set dst (RShiftVI src shift)); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8615 |
match(Set dst (URShiftVI src shift)); |
55299
40320fb1920a
8224234: compiler/codegen/TestCharVect2.java fails in test_mulc
sviswanathan
parents:
55061
diff
changeset
|
8616 |
effect(DEF dst, USE src, USE shift); |
54750 | 8617 |
format %{ "vshiftd $dst,$src,$shift\t! shift packed16I" %} |
30624 | 8618 |
ins_encode %{ |
8619 |
int vector_len = 2; |
|
54750 | 8620 |
int opcode = this->as_Mach()->ideal_Opcode(); |
8621 |
__ vshiftd(opcode, $dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector_len); |
|
8622 |
%} |
|
8623 |
ins_pipe( pipe_slow ); |
|
8624 |
%} |
|
8625 |
||
8626 |
// Longs vector shift |
|
8627 |
instruct vshift2L(vecX dst, vecX src, vecS shift) %{ |
|
8628 |
predicate(n->as_Vector()->length() == 2); |
|
8629 |
match(Set dst (LShiftVL src shift)); |
|
8630 |
match(Set dst (URShiftVL src shift)); |
|
55299
40320fb1920a
8224234: compiler/codegen/TestCharVect2.java fails in test_mulc
sviswanathan
parents:
55061
diff
changeset
|
8631 |
effect(TEMP dst, USE src, USE shift); |
54750 | 8632 |
format %{ "vshiftq $dst,$src,$shift\t! shift packed2L" %} |
8633 |
ins_encode %{ |
|
8634 |
int opcode = this->as_Mach()->ideal_Opcode(); |
|
8635 |
if (UseAVX == 0) { |
|
8636 |
if ($dst$$XMMRegister != $src$$XMMRegister) |
|
8637 |
__ movdqu($dst$$XMMRegister, $src$$XMMRegister); |
|
8638 |
__ vshiftq(opcode, $dst$$XMMRegister, $shift$$XMMRegister); |
|
8639 |
} else { |
|
8640 |
int vector_len = 0; |
|
8641 |
__ vshiftq(opcode, $dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector_len); |
|
8642 |
} |
|
8643 |
%} |
|
8644 |
ins_pipe( pipe_slow ); |
|
8645 |
%} |
|
8646 |
||
8647 |
instruct vshift4L(vecY dst, vecY src, vecS shift) %{ |
|
8648 |
predicate(UseAVX > 1 && n->as_Vector()->length() == 4); |
|
8649 |
match(Set dst (LShiftVL src shift)); |
|
8650 |
match(Set dst (URShiftVL src shift)); |
|
55299
40320fb1920a
8224234: compiler/codegen/TestCharVect2.java fails in test_mulc
sviswanathan
parents:
55061
diff
changeset
|
8651 |
effect(DEF dst, USE src, USE shift); |
54750 | 8652 |
format %{ "vshiftq $dst,$src,$shift\t! left shift packed4L" %} |
8653 |
ins_encode %{ |
|
8654 |
int vector_len = 1; |
|
8655 |
int opcode = this->as_Mach()->ideal_Opcode(); |
|
8656 |
__ vshiftq(opcode, $dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector_len); |
|
8657 |
%} |
|
8658 |
ins_pipe( pipe_slow ); |
|
8659 |
%} |
|
8660 |
||
8661 |
instruct vshift8L(vecZ dst, vecZ src, vecS shift) %{ |
|
8662 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 8); |
|
8663 |
match(Set dst (LShiftVL src shift)); |
|
8664 |
match(Set dst (RShiftVL src shift)); |
|
8665 |
match(Set dst (URShiftVL src shift)); |
|
55299
40320fb1920a
8224234: compiler/codegen/TestCharVect2.java fails in test_mulc
sviswanathan
parents:
55061
diff
changeset
|
8666 |
effect(DEF dst, USE src, USE shift); |
54750 | 8667 |
format %{ "vshiftq $dst,$src,$shift\t! shift packed8L" %} |
8668 |
ins_encode %{ |
|
8669 |
int vector_len = 2; |
|
8670 |
int opcode = this->as_Mach()->ideal_Opcode(); |
|
8671 |
__ vshiftq(opcode, $dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector_len); |
|
8672 |
%} |
|
8673 |
ins_pipe( pipe_slow ); |
|
8674 |
%} |
|
8675 |
||
8676 |
// -------------------ArithmeticRightShift ----------------------------------- |
|
8677 |
// Long vector arithmetic right shift |
|
8678 |
instruct vsra2L_reg(vecX dst, vecX src, vecS shift, vecX tmp, rRegI scratch) %{ |
|
8679 |
predicate(UseSSE >= 2 && n->as_Vector()->length() == 2); |
|
8680 |
match(Set dst (RShiftVL src shift)); |
|
8681 |
effect(TEMP dst, TEMP tmp, TEMP scratch); |
|
8682 |
format %{ "movdqu $dst,$src\n\t" |
|
8683 |
"psrlq $dst,$shift\n\t" |
|
8684 |
"movdqu $tmp,[0x8000000000000000]\n\t" |
|
8685 |
"psrlq $tmp,$shift\n\t" |
|
8686 |
"pxor $dst,$tmp\n\t" |
|
8687 |
"psubq $dst,$tmp\t! arithmetic right shift packed2L" %} |
|
8688 |
ins_encode %{ |
|
8689 |
__ movdqu($dst$$XMMRegister, $src$$XMMRegister); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8690 |
__ psrlq($dst$$XMMRegister, $shift$$XMMRegister); |
54750 | 8691 |
__ movdqu($tmp$$XMMRegister, ExternalAddress(vector_long_sign_mask()), $scratch$$Register); |
8692 |
__ psrlq($tmp$$XMMRegister, $shift$$XMMRegister); |
|
8693 |
__ pxor($dst$$XMMRegister, $tmp$$XMMRegister); |
|
8694 |
__ psubq($dst$$XMMRegister, $tmp$$XMMRegister); |
|
8695 |
%} |
|
8696 |
ins_pipe( pipe_slow ); |
|
8697 |
%} |
|
8698 |
||
8699 |
instruct vsra2L_reg_evex(vecX dst, vecX src, vecS shift) %{ |
|
8700 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 2); |
|
8701 |
match(Set dst (RShiftVL src shift)); |
|
8702 |
format %{ "evpsraq $dst,$src,$shift\t! arithmetic right shift packed2L" %} |
|
8703 |
ins_encode %{ |
|
8704 |
int vector_len = 0; |
|
8705 |
__ evpsraq($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector_len); |
|
8706 |
%} |
|
8707 |
ins_pipe( pipe_slow ); |
|
8708 |
%} |
|
8709 |
||
8710 |
instruct vsra4L_reg(vecY dst, vecY src, vecS shift, vecY tmp, rRegI scratch) %{ |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8711 |
predicate(UseAVX > 1 && n->as_Vector()->length() == 4); |
54750 | 8712 |
match(Set dst (RShiftVL src shift)); |
8713 |
effect(TEMP dst, TEMP tmp, TEMP scratch); |
|
8714 |
format %{ "vpsrlq $dst,$src,$shift\n\t" |
|
8715 |
"vmovdqu $tmp,[0x8000000000000000]\n\t" |
|
8716 |
"vpsrlq $tmp,$tmp,$shift\n\t" |
|
8717 |
"vpxor $dst,$dst,$tmp\n\t" |
|
8718 |
"vpsubq $dst,$dst,$tmp\t! arithmetic right shift packed4L" %} |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8719 |
ins_encode %{ |
30624 | 8720 |
int vector_len = 1; |
8721 |
__ vpsrlq($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector_len); |
|
54750 | 8722 |
__ vmovdqu($tmp$$XMMRegister, ExternalAddress(vector_long_sign_mask()), $scratch$$Register); |
8723 |
__ vpsrlq($tmp$$XMMRegister, $tmp$$XMMRegister, $shift$$XMMRegister, vector_len); |
|
8724 |
__ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister, vector_len); |
|
8725 |
__ vpsubq($dst$$XMMRegister, $dst$$XMMRegister, $tmp$$XMMRegister, vector_len); |
|
8726 |
%} |
|
8727 |
ins_pipe( pipe_slow ); |
|
8728 |
%} |
|
8729 |
||
8730 |
instruct vsra4L_reg_evex(vecY dst, vecY src, vecS shift) %{ |
|
8731 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 4); |
|
8732 |
match(Set dst (RShiftVL src shift)); |
|
8733 |
format %{ "evpsraq $dst,$src,$shift\t! arithmetic right shift packed4L" %} |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8734 |
ins_encode %{ |
30624 | 8735 |
int vector_len = 1; |
54750 | 8736 |
__ evpsraq($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector_len); |
8737 |
%} |
|
8738 |
ins_pipe( pipe_slow ); |
|
8739 |
%} |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8740 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8741 |
// --------------------------------- AND -------------------------------------- |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8742 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8743 |
instruct vand4B(vecS dst, vecS src) %{ |
51857 | 8744 |
predicate(UseAVX == 0 && n->as_Vector()->length_in_bytes() == 4); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8745 |
match(Set dst (AndV dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8746 |
format %{ "pand $dst,$src\t! and vectors (4 bytes)" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8747 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8748 |
__ pand($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8749 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8750 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8751 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8752 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8753 |
instruct vand4B_reg(vecS dst, vecS src1, vecS src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8754 |
predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 4); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8755 |
match(Set dst (AndV src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8756 |
format %{ "vpand $dst,$src1,$src2\t! and vectors (4 bytes)" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8757 |
ins_encode %{ |
30624 | 8758 |
int vector_len = 0; |
8759 |
__ vpand($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8760 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8761 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8762 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8763 |
|
31410 | 8764 |
instruct vand4B_mem(vecS dst, vecS src, memory mem) %{ |
8765 |
predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 4); |
|
8766 |
match(Set dst (AndV src (LoadVector mem))); |
|
8767 |
format %{ "vpand $dst,$src,$mem\t! and vectors (4 bytes)" %} |
|
8768 |
ins_encode %{ |
|
8769 |
int vector_len = 0; |
|
8770 |
__ vpand($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
8771 |
%} |
|
8772 |
ins_pipe( pipe_slow ); |
|
8773 |
%} |
|
8774 |
||
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8775 |
instruct vand8B(vecD dst, vecD src) %{ |
51857 | 8776 |
predicate(UseAVX == 0 && n->as_Vector()->length_in_bytes() == 8); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8777 |
match(Set dst (AndV dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8778 |
format %{ "pand $dst,$src\t! and vectors (8 bytes)" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8779 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8780 |
__ pand($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8781 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8782 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8783 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8784 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8785 |
instruct vand8B_reg(vecD dst, vecD src1, vecD src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8786 |
predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 8); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8787 |
match(Set dst (AndV src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8788 |
format %{ "vpand $dst,$src1,$src2\t! and vectors (8 bytes)" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8789 |
ins_encode %{ |
30624 | 8790 |
int vector_len = 0; |
8791 |
__ vpand($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8792 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8793 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8794 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8795 |
|
31410 | 8796 |
instruct vand8B_mem(vecD dst, vecD src, memory mem) %{ |
8797 |
predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 8); |
|
8798 |
match(Set dst (AndV src (LoadVector mem))); |
|
8799 |
format %{ "vpand $dst,$src,$mem\t! and vectors (8 bytes)" %} |
|
8800 |
ins_encode %{ |
|
8801 |
int vector_len = 0; |
|
8802 |
__ vpand($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
8803 |
%} |
|
8804 |
ins_pipe( pipe_slow ); |
|
8805 |
%} |
|
8806 |
||
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8807 |
instruct vand16B(vecX dst, vecX src) %{ |
51857 | 8808 |
predicate(UseAVX == 0 && n->as_Vector()->length_in_bytes() == 16); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8809 |
match(Set dst (AndV dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8810 |
format %{ "pand $dst,$src\t! and vectors (16 bytes)" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8811 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8812 |
__ pand($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8813 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8814 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8815 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8816 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8817 |
instruct vand16B_reg(vecX dst, vecX src1, vecX src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8818 |
predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 16); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8819 |
match(Set dst (AndV src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8820 |
format %{ "vpand $dst,$src1,$src2\t! and vectors (16 bytes)" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8821 |
ins_encode %{ |
30624 | 8822 |
int vector_len = 0; |
8823 |
__ vpand($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8824 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8825 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8826 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8827 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8828 |
instruct vand16B_mem(vecX dst, vecX src, memory mem) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8829 |
predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 16); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8830 |
match(Set dst (AndV src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8831 |
format %{ "vpand $dst,$src,$mem\t! and vectors (16 bytes)" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8832 |
ins_encode %{ |
30624 | 8833 |
int vector_len = 0; |
8834 |
__ vpand($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8835 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8836 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8837 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8838 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8839 |
instruct vand32B_reg(vecY dst, vecY src1, vecY src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8840 |
predicate(UseAVX > 1 && n->as_Vector()->length_in_bytes() == 32); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8841 |
match(Set dst (AndV src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8842 |
format %{ "vpand $dst,$src1,$src2\t! and vectors (32 bytes)" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8843 |
ins_encode %{ |
30624 | 8844 |
int vector_len = 1; |
8845 |
__ vpand($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8846 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8847 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8848 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8849 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8850 |
instruct vand32B_mem(vecY dst, vecY src, memory mem) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8851 |
predicate(UseAVX > 1 && n->as_Vector()->length_in_bytes() == 32); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8852 |
match(Set dst (AndV src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8853 |
format %{ "vpand $dst,$src,$mem\t! and vectors (32 bytes)" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8854 |
ins_encode %{ |
30624 | 8855 |
int vector_len = 1; |
8856 |
__ vpand($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
8857 |
%} |
|
8858 |
ins_pipe( pipe_slow ); |
|
8859 |
%} |
|
8860 |
||
8861 |
instruct vand64B_reg(vecZ dst, vecZ src1, vecZ src2) %{ |
|
8862 |
predicate(UseAVX > 2 && n->as_Vector()->length_in_bytes() == 64); |
|
8863 |
match(Set dst (AndV src1 src2)); |
|
8864 |
format %{ "vpand $dst,$src1,$src2\t! and vectors (64 bytes)" %} |
|
8865 |
ins_encode %{ |
|
8866 |
int vector_len = 2; |
|
8867 |
__ vpand($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
8868 |
%} |
|
8869 |
ins_pipe( pipe_slow ); |
|
8870 |
%} |
|
8871 |
||
8872 |
instruct vand64B_mem(vecZ dst, vecZ src, memory mem) %{ |
|
8873 |
predicate(UseAVX > 2 && n->as_Vector()->length_in_bytes() == 64); |
|
8874 |
match(Set dst (AndV src (LoadVector mem))); |
|
8875 |
format %{ "vpand $dst,$src,$mem\t! and vectors (64 bytes)" %} |
|
8876 |
ins_encode %{ |
|
8877 |
int vector_len = 2; |
|
8878 |
__ vpand($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8879 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8880 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8881 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8882 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8883 |
// --------------------------------- OR --------------------------------------- |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8884 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8885 |
instruct vor4B(vecS dst, vecS src) %{ |
51857 | 8886 |
predicate(UseAVX == 0 && n->as_Vector()->length_in_bytes() == 4); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8887 |
match(Set dst (OrV dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8888 |
format %{ "por $dst,$src\t! or vectors (4 bytes)" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8889 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8890 |
__ por($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8891 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8892 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8893 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8894 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8895 |
instruct vor4B_reg(vecS dst, vecS src1, vecS src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8896 |
predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 4); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8897 |
match(Set dst (OrV src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8898 |
format %{ "vpor $dst,$src1,$src2\t! or vectors (4 bytes)" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8899 |
ins_encode %{ |
30624 | 8900 |
int vector_len = 0; |
8901 |
__ vpor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8902 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8903 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8904 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8905 |
|
31410 | 8906 |
instruct vor4B_mem(vecS dst, vecS src, memory mem) %{ |
8907 |
predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 4); |
|
8908 |
match(Set dst (OrV src (LoadVector mem))); |
|
8909 |
format %{ "vpor $dst,$src,$mem\t! or vectors (4 bytes)" %} |
|
8910 |
ins_encode %{ |
|
8911 |
int vector_len = 0; |
|
8912 |
__ vpor($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
8913 |
%} |
|
8914 |
ins_pipe( pipe_slow ); |
|
8915 |
%} |
|
8916 |
||
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8917 |
instruct vor8B(vecD dst, vecD src) %{ |
51857 | 8918 |
predicate(UseAVX == 0 && n->as_Vector()->length_in_bytes() == 8); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8919 |
match(Set dst (OrV dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8920 |
format %{ "por $dst,$src\t! or vectors (8 bytes)" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8921 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8922 |
__ por($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8923 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8924 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8925 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8926 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8927 |
instruct vor8B_reg(vecD dst, vecD src1, vecD src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8928 |
predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 8); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8929 |
match(Set dst (OrV src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8930 |
format %{ "vpor $dst,$src1,$src2\t! or vectors (8 bytes)" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8931 |
ins_encode %{ |
30624 | 8932 |
int vector_len = 0; |
8933 |
__ vpor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8934 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8935 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8936 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8937 |
|
31410 | 8938 |
instruct vor8B_mem(vecD dst, vecD src, memory mem) %{ |
8939 |
predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 4); |
|
8940 |
match(Set dst (OrV src (LoadVector mem))); |
|
8941 |
format %{ "vpor $dst,$src,$mem\t! or vectors (8 bytes)" %} |
|
8942 |
ins_encode %{ |
|
8943 |
int vector_len = 0; |
|
8944 |
__ vpor($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
8945 |
%} |
|
8946 |
ins_pipe( pipe_slow ); |
|
8947 |
%} |
|
8948 |
||
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8949 |
instruct vor16B(vecX dst, vecX src) %{ |
51857 | 8950 |
predicate(UseAVX == 0 && n->as_Vector()->length_in_bytes() == 16); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8951 |
match(Set dst (OrV dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8952 |
format %{ "por $dst,$src\t! or vectors (16 bytes)" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8953 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8954 |
__ por($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8955 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8956 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8957 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8958 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8959 |
instruct vor16B_reg(vecX dst, vecX src1, vecX src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8960 |
predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 16); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8961 |
match(Set dst (OrV src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8962 |
format %{ "vpor $dst,$src1,$src2\t! or vectors (16 bytes)" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8963 |
ins_encode %{ |
30624 | 8964 |
int vector_len = 0; |
8965 |
__ vpor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8966 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8967 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8968 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8969 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8970 |
instruct vor16B_mem(vecX dst, vecX src, memory mem) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8971 |
predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 16); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8972 |
match(Set dst (OrV src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8973 |
format %{ "vpor $dst,$src,$mem\t! or vectors (16 bytes)" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8974 |
ins_encode %{ |
30624 | 8975 |
int vector_len = 0; |
8976 |
__ vpor($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8977 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8978 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8979 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8980 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8981 |
instruct vor32B_reg(vecY dst, vecY src1, vecY src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8982 |
predicate(UseAVX > 1 && n->as_Vector()->length_in_bytes() == 32); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8983 |
match(Set dst (OrV src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8984 |
format %{ "vpor $dst,$src1,$src2\t! or vectors (32 bytes)" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8985 |
ins_encode %{ |
30624 | 8986 |
int vector_len = 1; |
8987 |
__ vpor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8988 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8989 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8990 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8991 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8992 |
instruct vor32B_mem(vecY dst, vecY src, memory mem) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8993 |
predicate(UseAVX > 1 && n->as_Vector()->length_in_bytes() == 32); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8994 |
match(Set dst (OrV src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8995 |
format %{ "vpor $dst,$src,$mem\t! or vectors (32 bytes)" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
8996 |
ins_encode %{ |
30624 | 8997 |
int vector_len = 1; |
8998 |
__ vpor($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
8999 |
%} |
|
9000 |
ins_pipe( pipe_slow ); |
|
9001 |
%} |
|
9002 |
||
9003 |
instruct vor64B_reg(vecZ dst, vecZ src1, vecZ src2) %{ |
|
9004 |
predicate(UseAVX > 2 && n->as_Vector()->length_in_bytes() == 64); |
|
9005 |
match(Set dst (OrV src1 src2)); |
|
9006 |
format %{ "vpor $dst,$src1,$src2\t! or vectors (64 bytes)" %} |
|
9007 |
ins_encode %{ |
|
9008 |
int vector_len = 2; |
|
9009 |
__ vpor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
9010 |
%} |
|
9011 |
ins_pipe( pipe_slow ); |
|
9012 |
%} |
|
9013 |
||
9014 |
instruct vor64B_mem(vecZ dst, vecZ src, memory mem) %{ |
|
9015 |
predicate(UseAVX > 2 && n->as_Vector()->length_in_bytes() == 64); |
|
9016 |
match(Set dst (OrV src (LoadVector mem))); |
|
9017 |
format %{ "vpor $dst,$src,$mem\t! or vectors (64 bytes)" %} |
|
9018 |
ins_encode %{ |
|
9019 |
int vector_len = 2; |
|
9020 |
__ vpor($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9021 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9022 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9023 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9024 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9025 |
// --------------------------------- XOR -------------------------------------- |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9026 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9027 |
instruct vxor4B(vecS dst, vecS src) %{ |
51857 | 9028 |
predicate(UseAVX == 0 && n->as_Vector()->length_in_bytes() == 4); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9029 |
match(Set dst (XorV dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9030 |
format %{ "pxor $dst,$src\t! xor vectors (4 bytes)" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9031 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9032 |
__ pxor($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9033 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9034 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9035 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9036 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9037 |
instruct vxor4B_reg(vecS dst, vecS src1, vecS src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9038 |
predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 4); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9039 |
match(Set dst (XorV src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9040 |
format %{ "vpxor $dst,$src1,$src2\t! xor vectors (4 bytes)" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9041 |
ins_encode %{ |
30624 | 9042 |
int vector_len = 0; |
9043 |
__ vpxor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9044 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9045 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9046 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9047 |
|
31410 | 9048 |
instruct vxor4B_mem(vecS dst, vecS src, memory mem) %{ |
9049 |
predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 4); |
|
9050 |
match(Set dst (XorV src (LoadVector mem))); |
|
9051 |
format %{ "vpxor $dst,$src,$mem\t! xor vectors (4 bytes)" %} |
|
9052 |
ins_encode %{ |
|
9053 |
int vector_len = 0; |
|
9054 |
__ vpxor($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
9055 |
%} |
|
9056 |
ins_pipe( pipe_slow ); |
|
9057 |
%} |
|
9058 |
||
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9059 |
instruct vxor8B(vecD dst, vecD src) %{ |
51857 | 9060 |
predicate(UseAVX == 0 && n->as_Vector()->length_in_bytes() == 8); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9061 |
match(Set dst (XorV dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9062 |
format %{ "pxor $dst,$src\t! xor vectors (8 bytes)" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9063 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9064 |
__ pxor($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9065 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9066 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9067 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9068 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9069 |
instruct vxor8B_reg(vecD dst, vecD src1, vecD src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9070 |
predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 8); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9071 |
match(Set dst (XorV src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9072 |
format %{ "vpxor $dst,$src1,$src2\t! xor vectors (8 bytes)" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9073 |
ins_encode %{ |
30624 | 9074 |
int vector_len = 0; |
9075 |
__ vpxor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9076 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9077 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9078 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9079 |
|
31410 | 9080 |
instruct vxor8B_mem(vecD dst, vecD src, memory mem) %{ |
9081 |
predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 8); |
|
9082 |
match(Set dst (XorV src (LoadVector mem))); |
|
9083 |
format %{ "vpxor $dst,$src,$mem\t! xor vectors (8 bytes)" %} |
|
9084 |
ins_encode %{ |
|
9085 |
int vector_len = 0; |
|
9086 |
__ vpxor($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
9087 |
%} |
|
9088 |
ins_pipe( pipe_slow ); |
|
9089 |
%} |
|
9090 |
||
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9091 |
instruct vxor16B(vecX dst, vecX src) %{ |
51857 | 9092 |
predicate(UseAVX == 0 && n->as_Vector()->length_in_bytes() == 16); |
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9093 |
match(Set dst (XorV dst src)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9094 |
format %{ "pxor $dst,$src\t! xor vectors (16 bytes)" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9095 |
ins_encode %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9096 |
__ pxor($dst$$XMMRegister, $src$$XMMRegister); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9097 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9098 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9099 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9100 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9101 |
instruct vxor16B_reg(vecX dst, vecX src1, vecX src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9102 |
predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 16); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9103 |
match(Set dst (XorV src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9104 |
format %{ "vpxor $dst,$src1,$src2\t! xor vectors (16 bytes)" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9105 |
ins_encode %{ |
30624 | 9106 |
int vector_len = 0; |
9107 |
__ vpxor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9108 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9109 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9110 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9111 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9112 |
instruct vxor16B_mem(vecX dst, vecX src, memory mem) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9113 |
predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 16); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9114 |
match(Set dst (XorV src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9115 |
format %{ "vpxor $dst,$src,$mem\t! xor vectors (16 bytes)" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9116 |
ins_encode %{ |
30624 | 9117 |
int vector_len = 0; |
9118 |
__ vpxor($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9119 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9120 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9121 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9122 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9123 |
instruct vxor32B_reg(vecY dst, vecY src1, vecY src2) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9124 |
predicate(UseAVX > 1 && n->as_Vector()->length_in_bytes() == 32); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9125 |
match(Set dst (XorV src1 src2)); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9126 |
format %{ "vpxor $dst,$src1,$src2\t! xor vectors (32 bytes)" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9127 |
ins_encode %{ |
30624 | 9128 |
int vector_len = 1; |
9129 |
__ vpxor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
13485
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9130 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9131 |
ins_pipe( pipe_slow ); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9132 |
%} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9133 |
|
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9134 |
instruct vxor32B_mem(vecY dst, vecY src, memory mem) %{ |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9135 |
predicate(UseAVX > 1 && n->as_Vector()->length_in_bytes() == 32); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9136 |
match(Set dst (XorV src (LoadVector mem))); |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9137 |
format %{ "vpxor $dst,$src,$mem\t! xor vectors (32 bytes)" %} |
6c7faa516fc6
6340864: Implement vectorization optimizations in hotspot-server
kvn
parents:
13294
diff
changeset
|
9138 |
ins_encode %{ |
30624 | 9139 |
int vector_len = 1; |
9140 |
__ vpxor($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
9141 |
%} |
|
9142 |
ins_pipe( pipe_slow ); |
|
9143 |
%} |
|
9144 |
||
9145 |
instruct vxor64B_reg(vecZ dst, vecZ src1, vecZ src2) %{ |
|
9146 |
predicate(UseAVX > 2 && n->as_Vector()->length_in_bytes() == 64); |
|
9147 |
match(Set dst (XorV src1 src2)); |
|
9148 |
format %{ "vpxor $dst,$src1,$src2\t! xor vectors (64 bytes)" %} |
|
9149 |
ins_encode %{ |
|
9150 |
int vector_len = 2; |
|
9151 |
__ vpxor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
9152 |
%} |
|
9153 |
ins_pipe( pipe_slow ); |
|
9154 |
%} |
|
9155 |
||
9156 |
instruct vxor64B_mem(vecZ dst, vecZ src, memory mem) %{ |
|
9157 |
predicate(UseAVX > 2 && n->as_Vector()->length_in_bytes() == 64); |
|
9158 |
match(Set dst (XorV src (LoadVector mem))); |
|
9159 |
format %{ "vpxor $dst,$src,$mem\t! xor vectors (64 bytes)" %} |
|
9160 |
ins_encode %{ |
|
9161 |
int vector_len = 2; |
|
9162 |
__ vpxor($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector_len); |
|
9163 |
%} |
|
9164 |
ins_pipe( pipe_slow ); |
|
9165 |
%} |
|
9166 |
||
54750 | 9167 |
// --------------------------------- ABS -------------------------------------- |
9168 |
// a = |a| |
|
9169 |
instruct vabs4B_reg(vecS dst, vecS src) %{ |
|
9170 |
predicate(UseSSE > 2 && n->as_Vector()->length() == 4); |
|
9171 |
match(Set dst (AbsVB src)); |
|
9172 |
format %{ "pabsb $dst,$src\t# $dst = |$src| abs packed4B" %} |
|
9173 |
ins_encode %{ |
|
9174 |
__ pabsb($dst$$XMMRegister, $src$$XMMRegister); |
|
9175 |
%} |
|
9176 |
ins_pipe( pipe_slow ); |
|
9177 |
%} |
|
9178 |
||
9179 |
instruct vabs8B_reg(vecD dst, vecD src) %{ |
|
9180 |
predicate(UseSSE > 2 && n->as_Vector()->length() == 8); |
|
9181 |
match(Set dst (AbsVB src)); |
|
9182 |
format %{ "pabsb $dst,$src\t# $dst = |$src| abs packed8B" %} |
|
9183 |
ins_encode %{ |
|
9184 |
__ pabsb($dst$$XMMRegister, $src$$XMMRegister); |
|
9185 |
%} |
|
9186 |
ins_pipe( pipe_slow ); |
|
9187 |
%} |
|
9188 |
||
9189 |
instruct vabs16B_reg(vecX dst, vecX src) %{ |
|
9190 |
predicate(UseSSE > 2 && n->as_Vector()->length() == 16); |
|
9191 |
match(Set dst (AbsVB src)); |
|
9192 |
format %{ "pabsb $dst,$src\t# $dst = |$src| abs packed16B" %} |
|
9193 |
ins_encode %{ |
|
9194 |
__ pabsb($dst$$XMMRegister, $src$$XMMRegister); |
|
9195 |
%} |
|
9196 |
ins_pipe( pipe_slow ); |
|
9197 |
%} |
|
9198 |
||
9199 |
instruct vabs32B_reg(vecY dst, vecY src) %{ |
|
9200 |
predicate(UseAVX > 1 && n->as_Vector()->length() == 32); |
|
9201 |
match(Set dst (AbsVB src)); |
|
9202 |
format %{ "vpabsb $dst,$src\t# $dst = |$src| abs packed32B" %} |
|
9203 |
ins_encode %{ |
|
9204 |
int vector_len = 1; |
|
9205 |
__ vpabsb($dst$$XMMRegister, $src$$XMMRegister, vector_len); |
|
9206 |
%} |
|
9207 |
ins_pipe( pipe_slow ); |
|
9208 |
%} |
|
9209 |
||
9210 |
instruct vabs64B_reg(vecZ dst, vecZ src) %{ |
|
9211 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 64); |
|
9212 |
match(Set dst (AbsVB src)); |
|
9213 |
format %{ "vpabsb $dst,$src\t# $dst = |$src| abs packed64B" %} |
|
9214 |
ins_encode %{ |
|
9215 |
int vector_len = 2; |
|
9216 |
__ vpabsb($dst$$XMMRegister, $src$$XMMRegister, vector_len); |
|
9217 |
%} |
|
9218 |
ins_pipe( pipe_slow ); |
|
9219 |
%} |
|
9220 |
||
9221 |
instruct vabs2S_reg(vecD dst, vecD src) %{ |
|
9222 |
predicate(UseSSE > 2 && n->as_Vector()->length() == 2); |
|
9223 |
match(Set dst (AbsVS src)); |
|
9224 |
format %{ "pabsw $dst,$src\t# $dst = |$src| abs packed2S" %} |
|
9225 |
ins_encode %{ |
|
9226 |
__ pabsw($dst$$XMMRegister, $src$$XMMRegister); |
|
9227 |
%} |
|
9228 |
ins_pipe( pipe_slow ); |
|
9229 |
%} |
|
9230 |
||
9231 |
instruct vabs4S_reg(vecD dst, vecD src) %{ |
|
9232 |
predicate(UseSSE > 2 && n->as_Vector()->length() == 4); |
|
9233 |
match(Set dst (AbsVS src)); |
|
9234 |
format %{ "pabsw $dst,$src\t# $dst = |$src| abs packed4S" %} |
|
9235 |
ins_encode %{ |
|
9236 |
__ pabsw($dst$$XMMRegister, $src$$XMMRegister); |
|
9237 |
%} |
|
9238 |
ins_pipe( pipe_slow ); |
|
9239 |
%} |
|
9240 |
||
9241 |
instruct vabs8S_reg(vecX dst, vecX src) %{ |
|
9242 |
predicate(UseSSE > 2 && n->as_Vector()->length() == 8); |
|
9243 |
match(Set dst (AbsVS src)); |
|
9244 |
format %{ "pabsw $dst,$src\t# $dst = |$src| abs packed8S" %} |
|
9245 |
ins_encode %{ |
|
9246 |
__ pabsw($dst$$XMMRegister, $src$$XMMRegister); |
|
9247 |
%} |
|
9248 |
ins_pipe( pipe_slow ); |
|
9249 |
%} |
|
9250 |
||
9251 |
instruct vabs16S_reg(vecY dst, vecY src) %{ |
|
9252 |
predicate(UseAVX > 1 && n->as_Vector()->length() == 16); |
|
9253 |
match(Set dst (AbsVS src)); |
|
9254 |
format %{ "vpabsw $dst,$src\t# $dst = |$src| abs packed16S" %} |
|
9255 |
ins_encode %{ |
|
9256 |
int vector_len = 1; |
|
9257 |
__ vpabsw($dst$$XMMRegister, $src$$XMMRegister, vector_len); |
|
9258 |
%} |
|
9259 |
ins_pipe( pipe_slow ); |
|
9260 |
%} |
|
9261 |
||
9262 |
instruct vabs32S_reg(vecZ dst, vecZ src) %{ |
|
9263 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 32); |
|
9264 |
match(Set dst (AbsVS src)); |
|
9265 |
format %{ "vpabsw $dst,$src\t# $dst = |$src| abs packed32S" %} |
|
9266 |
ins_encode %{ |
|
9267 |
int vector_len = 2; |
|
9268 |
__ vpabsw($dst$$XMMRegister, $src$$XMMRegister, vector_len); |
|
9269 |
%} |
|
9270 |
ins_pipe( pipe_slow ); |
|
9271 |
%} |
|
9272 |
||
9273 |
instruct vabs2I_reg(vecD dst, vecD src) %{ |
|
9274 |
predicate(UseSSE > 2 && n->as_Vector()->length() == 2); |
|
9275 |
match(Set dst (AbsVI src)); |
|
9276 |
format %{ "pabsd $dst,$src\t# $dst = |$src| abs packed2I" %} |
|
9277 |
ins_encode %{ |
|
9278 |
__ pabsd($dst$$XMMRegister, $src$$XMMRegister); |
|
9279 |
%} |
|
9280 |
ins_pipe( pipe_slow ); |
|
9281 |
%} |
|
9282 |
||
9283 |
instruct vabs4I_reg(vecX dst, vecX src) %{ |
|
9284 |
predicate(UseSSE > 2 && n->as_Vector()->length() == 4); |
|
9285 |
match(Set dst (AbsVI src)); |
|
9286 |
format %{ "pabsd $dst,$src\t# $dst = |$src| abs packed4I" %} |
|
9287 |
ins_encode %{ |
|
9288 |
__ pabsd($dst$$XMMRegister, $src$$XMMRegister); |
|
9289 |
%} |
|
9290 |
ins_pipe( pipe_slow ); |
|
9291 |
%} |
|
9292 |
||
9293 |
instruct vabs8I_reg(vecY dst, vecY src) %{ |
|
9294 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
|
9295 |
match(Set dst (AbsVI src)); |
|
9296 |
format %{ "vpabsd $dst,$src\t# $dst = |$src| abs packed8I" %} |
|
9297 |
ins_encode %{ |
|
9298 |
int vector_len = 1; |
|
9299 |
__ vpabsd($dst$$XMMRegister, $src$$XMMRegister, vector_len); |
|
9300 |
%} |
|
9301 |
ins_pipe( pipe_slow ); |
|
9302 |
%} |
|
9303 |
||
9304 |
instruct vabs16I_reg(vecZ dst, vecZ src) %{ |
|
9305 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 16); |
|
9306 |
match(Set dst (AbsVI src)); |
|
9307 |
format %{ "vpabsd $dst,$src\t# $dst = |$src| abs packed16I" %} |
|
9308 |
ins_encode %{ |
|
9309 |
int vector_len = 2; |
|
9310 |
__ vpabsd($dst$$XMMRegister, $src$$XMMRegister, vector_len); |
|
9311 |
%} |
|
9312 |
ins_pipe( pipe_slow ); |
|
9313 |
%} |
|
9314 |
||
9315 |
instruct vabs2L_reg(vecX dst, vecX src) %{ |
|
9316 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 2); |
|
9317 |
match(Set dst (AbsVL src)); |
|
9318 |
format %{ "evpabsq $dst,$src\t# $dst = |$src| abs packed2L" %} |
|
9319 |
ins_encode %{ |
|
9320 |
int vector_len = 0; |
|
9321 |
__ evpabsq($dst$$XMMRegister, $src$$XMMRegister, vector_len); |
|
9322 |
%} |
|
9323 |
ins_pipe( pipe_slow ); |
|
9324 |
%} |
|
9325 |
||
9326 |
instruct vabs4L_reg(vecY dst, vecY src) %{ |
|
9327 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 4); |
|
9328 |
match(Set dst (AbsVL src)); |
|
9329 |
format %{ "evpabsq $dst,$src\t# $dst = |$src| abs packed4L" %} |
|
9330 |
ins_encode %{ |
|
9331 |
int vector_len = 1; |
|
9332 |
__ evpabsq($dst$$XMMRegister, $src$$XMMRegister, vector_len); |
|
9333 |
%} |
|
9334 |
ins_pipe( pipe_slow ); |
|
9335 |
%} |
|
9336 |
||
9337 |
instruct vabs8L_reg(vecZ dst, vecZ src) %{ |
|
9338 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 8); |
|
9339 |
match(Set dst (AbsVL src)); |
|
9340 |
format %{ "evpabsq $dst,$src\t# $dst = |$src| abs packed8L" %} |
|
9341 |
ins_encode %{ |
|
9342 |
int vector_len = 2; |
|
9343 |
__ evpabsq($dst$$XMMRegister, $src$$XMMRegister, vector_len); |
|
9344 |
%} |
|
9345 |
ins_pipe( pipe_slow ); |
|
9346 |
%} |
|
9347 |
||
9348 |
// --------------------------------- ABSNEG -------------------------------------- |
|
9349 |
||
9350 |
instruct vabsneg2D(vecX dst, vecX src, rRegI scratch) %{ |
|
9351 |
predicate(UseSSE >= 2 && n->as_Vector()->length() == 2); |
|
9352 |
match(Set dst (AbsVD src)); |
|
9353 |
match(Set dst (NegVD src)); |
|
9354 |
effect(TEMP scratch); |
|
9355 |
format %{ "vabsnegd $dst,$src,[mask]\t# absneg packed2D" %} |
|
9356 |
ins_encode %{ |
|
9357 |
int opcode = this->as_Mach()->ideal_Opcode(); |
|
9358 |
if ($dst$$XMMRegister != $src$$XMMRegister) |
|
9359 |
__ movdqu($dst$$XMMRegister, $src$$XMMRegister); |
|
9360 |
__ vabsnegd(opcode, $dst$$XMMRegister, $scratch$$Register); |
|
9361 |
%} |
|
9362 |
ins_pipe( pipe_slow ); |
|
9363 |
%} |
|
9364 |
||
9365 |
instruct vabsneg4D(vecY dst, vecY src, rRegI scratch) %{ |
|
9366 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
|
9367 |
match(Set dst (AbsVD src)); |
|
9368 |
match(Set dst (NegVD src)); |
|
9369 |
effect(TEMP scratch); |
|
9370 |
format %{ "vabsnegd $dst,$src,[mask]\t# absneg packed4D" %} |
|
9371 |
ins_encode %{ |
|
9372 |
int opcode = this->as_Mach()->ideal_Opcode(); |
|
9373 |
int vector_len = 1; |
|
9374 |
__ vabsnegd(opcode, $dst$$XMMRegister, $src$$XMMRegister, vector_len, $scratch$$Register); |
|
9375 |
%} |
|
9376 |
ins_pipe( pipe_slow ); |
|
9377 |
%} |
|
9378 |
||
9379 |
instruct vabsneg8D(vecZ dst, vecZ src, rRegI scratch) %{ |
|
9380 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 8); |
|
9381 |
match(Set dst (AbsVD src)); |
|
9382 |
match(Set dst (NegVD src)); |
|
9383 |
effect(TEMP scratch); |
|
9384 |
format %{ "vabsnegd $dst,$src,[mask]\t# absneg packed8D" %} |
|
9385 |
ins_encode %{ |
|
9386 |
int opcode = this->as_Mach()->ideal_Opcode(); |
|
9387 |
int vector_len = 2; |
|
9388 |
__ vabsnegd(opcode, $dst$$XMMRegister, $src$$XMMRegister, vector_len, $scratch$$Register); |
|
9389 |
%} |
|
9390 |
ins_pipe( pipe_slow ); |
|
9391 |
%} |
|
9392 |
||
9393 |
instruct vabsneg2F(vecD dst, vecD src, rRegI scratch) %{ |
|
9394 |
predicate(UseSSE > 0 && n->as_Vector()->length() == 2); |
|
9395 |
match(Set dst (AbsVF src)); |
|
9396 |
match(Set dst (NegVF src)); |
|
9397 |
effect(TEMP scratch); |
|
9398 |
format %{ "vabsnegf $dst,$src,[mask]\t# absneg packed2F" %} |
|
9399 |
ins_encode %{ |
|
9400 |
int opcode = this->as_Mach()->ideal_Opcode(); |
|
9401 |
if ($dst$$XMMRegister != $src$$XMMRegister) |
|
9402 |
__ movdqu($dst$$XMMRegister, $src$$XMMRegister); |
|
9403 |
__ vabsnegf(opcode, $dst$$XMMRegister, $scratch$$Register); |
|
9404 |
%} |
|
9405 |
ins_pipe( pipe_slow ); |
|
9406 |
%} |
|
9407 |
||
9408 |
instruct vabsneg4F(vecX dst, rRegI scratch) %{ |
|
9409 |
predicate(UseSSE > 0 && n->as_Vector()->length() == 4); |
|
9410 |
match(Set dst (AbsVF dst)); |
|
9411 |
match(Set dst (NegVF dst)); |
|
9412 |
effect(TEMP scratch); |
|
9413 |
format %{ "vabsnegf $dst,[mask]\t# absneg packed4F" %} |
|
9414 |
ins_cost(150); |
|
9415 |
ins_encode %{ |
|
9416 |
int opcode = this->as_Mach()->ideal_Opcode(); |
|
9417 |
__ vabsnegf(opcode, $dst$$XMMRegister, $scratch$$Register); |
|
9418 |
%} |
|
9419 |
ins_pipe( pipe_slow ); |
|
9420 |
%} |
|
9421 |
||
9422 |
instruct vabsneg8F(vecY dst, vecY src, rRegI scratch) %{ |
|
9423 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 8); |
|
9424 |
match(Set dst (AbsVF src)); |
|
9425 |
match(Set dst (NegVF src)); |
|
9426 |
effect(TEMP scratch); |
|
9427 |
format %{ "vabsnegf $dst,$src,[mask]\t# absneg packed8F" %} |
|
9428 |
ins_cost(150); |
|
9429 |
ins_encode %{ |
|
9430 |
int opcode = this->as_Mach()->ideal_Opcode(); |
|
9431 |
int vector_len = 1; |
|
9432 |
__ vabsnegf(opcode, $dst$$XMMRegister, $src$$XMMRegister, vector_len, $scratch$$Register); |
|
9433 |
%} |
|
9434 |
ins_pipe( pipe_slow ); |
|
9435 |
%} |
|
9436 |
||
9437 |
instruct vabsneg16F(vecZ dst, vecZ src, rRegI scratch) %{ |
|
9438 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 16); |
|
9439 |
match(Set dst (AbsVF src)); |
|
9440 |
match(Set dst (NegVF src)); |
|
9441 |
effect(TEMP scratch); |
|
9442 |
format %{ "vabsnegf $dst,$src,[mask]\t# absneg packed16F" %} |
|
9443 |
ins_cost(150); |
|
9444 |
ins_encode %{ |
|
9445 |
int opcode = this->as_Mach()->ideal_Opcode(); |
|
9446 |
int vector_len = 2; |
|
9447 |
__ vabsnegf(opcode, $dst$$XMMRegister, $src$$XMMRegister, vector_len, $scratch$$Register); |
|
9448 |
%} |
|
9449 |
ins_pipe( pipe_slow ); |
|
9450 |
%} |
|
9451 |
||
46528 | 9452 |
// --------------------------------- FMA -------------------------------------- |
9453 |
||
9454 |
// a * b + c |
|
9455 |
instruct vfma2D_reg(vecX a, vecX b, vecX c) %{ |
|
9456 |
predicate(UseFMA && n->as_Vector()->length() == 2); |
|
9457 |
match(Set c (FmaVD c (Binary a b))); |
|
9458 |
format %{ "fmapd $a,$b,$c\t# $c = $a * $b + $c fma packed2D" %} |
|
9459 |
ins_cost(150); |
|
9460 |
ins_encode %{ |
|
9461 |
int vector_len = 0; |
|
9462 |
__ vfmad($c$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $c$$XMMRegister, vector_len); |
|
9463 |
%} |
|
9464 |
ins_pipe( pipe_slow ); |
|
9465 |
%} |
|
9466 |
||
9467 |
// a * b + c |
|
9468 |
instruct vfma2D_mem(vecX a, memory b, vecX c) %{ |
|
9469 |
predicate(UseFMA && n->as_Vector()->length() == 2); |
|
9470 |
match(Set c (FmaVD c (Binary a (LoadVector b)))); |
|
9471 |
format %{ "fmapd $a,$b,$c\t# $c = $a * $b + $c fma packed2D" %} |
|
9472 |
ins_cost(150); |
|
9473 |
ins_encode %{ |
|
9474 |
int vector_len = 0; |
|
9475 |
__ vfmad($c$$XMMRegister, $a$$XMMRegister, $b$$Address, $c$$XMMRegister, vector_len); |
|
9476 |
%} |
|
9477 |
ins_pipe( pipe_slow ); |
|
9478 |
%} |
|
9479 |
||
9480 |
||
9481 |
// a * b + c |
|
9482 |
instruct vfma4D_reg(vecY a, vecY b, vecY c) %{ |
|
9483 |
predicate(UseFMA && n->as_Vector()->length() == 4); |
|
9484 |
match(Set c (FmaVD c (Binary a b))); |
|
9485 |
format %{ "fmapd $a,$b,$c\t# $c = $a * $b + $c fma packed4D" %} |
|
9486 |
ins_cost(150); |
|
9487 |
ins_encode %{ |
|
9488 |
int vector_len = 1; |
|
9489 |
__ vfmad($c$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $c$$XMMRegister, vector_len); |
|
9490 |
%} |
|
9491 |
ins_pipe( pipe_slow ); |
|
9492 |
%} |
|
9493 |
||
9494 |
// a * b + c |
|
9495 |
instruct vfma4D_mem(vecY a, memory b, vecY c) %{ |
|
9496 |
predicate(UseFMA && n->as_Vector()->length() == 4); |
|
9497 |
match(Set c (FmaVD c (Binary a (LoadVector b)))); |
|
9498 |
format %{ "fmapd $a,$b,$c\t# $c = $a * $b + $c fma packed4D" %} |
|
9499 |
ins_cost(150); |
|
9500 |
ins_encode %{ |
|
9501 |
int vector_len = 1; |
|
9502 |
__ vfmad($c$$XMMRegister, $a$$XMMRegister, $b$$Address, $c$$XMMRegister, vector_len); |
|
9503 |
%} |
|
9504 |
ins_pipe( pipe_slow ); |
|
9505 |
%} |
|
9506 |
||
9507 |
// a * b + c |
|
9508 |
instruct vfma8D_reg(vecZ a, vecZ b, vecZ c) %{ |
|
9509 |
predicate(UseFMA && n->as_Vector()->length() == 8); |
|
9510 |
match(Set c (FmaVD c (Binary a b))); |
|
9511 |
format %{ "fmapd $a,$b,$c\t# $c = $a * $b + $c fma packed8D" %} |
|
9512 |
ins_cost(150); |
|
9513 |
ins_encode %{ |
|
9514 |
int vector_len = 2; |
|
9515 |
__ vfmad($c$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $c$$XMMRegister, vector_len); |
|
9516 |
%} |
|
9517 |
ins_pipe( pipe_slow ); |
|
9518 |
%} |
|
9519 |
||
9520 |
// a * b + c |
|
9521 |
instruct vfma8D_mem(vecZ a, memory b, vecZ c) %{ |
|
9522 |
predicate(UseFMA && n->as_Vector()->length() == 8); |
|
9523 |
match(Set c (FmaVD c (Binary a (LoadVector b)))); |
|
9524 |
format %{ "fmapd $a,$b,$c\t# $c = $a * $b + $c fma packed8D" %} |
|
9525 |
ins_cost(150); |
|
9526 |
ins_encode %{ |
|
9527 |
int vector_len = 2; |
|
9528 |
__ vfmad($c$$XMMRegister, $a$$XMMRegister, $b$$Address, $c$$XMMRegister, vector_len); |
|
9529 |
%} |
|
9530 |
ins_pipe( pipe_slow ); |
|
9531 |
%} |
|
9532 |
||
9533 |
// a * b + c |
|
9534 |
instruct vfma4F_reg(vecX a, vecX b, vecX c) %{ |
|
9535 |
predicate(UseFMA && n->as_Vector()->length() == 4); |
|
9536 |
match(Set c (FmaVF c (Binary a b))); |
|
9537 |
format %{ "fmaps $a,$b,$c\t# $c = $a * $b + $c fma packed4F" %} |
|
9538 |
ins_cost(150); |
|
9539 |
ins_encode %{ |
|
9540 |
int vector_len = 0; |
|
9541 |
__ vfmaf($c$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $c$$XMMRegister, vector_len); |
|
9542 |
%} |
|
9543 |
ins_pipe( pipe_slow ); |
|
9544 |
%} |
|
9545 |
||
9546 |
// a * b + c |
|
9547 |
instruct vfma4F_mem(vecX a, memory b, vecX c) %{ |
|
9548 |
predicate(UseFMA && n->as_Vector()->length() == 4); |
|
9549 |
match(Set c (FmaVF c (Binary a (LoadVector b)))); |
|
9550 |
format %{ "fmaps $a,$b,$c\t# $c = $a * $b + $c fma packed4F" %} |
|
9551 |
ins_cost(150); |
|
9552 |
ins_encode %{ |
|
9553 |
int vector_len = 0; |
|
9554 |
__ vfmaf($c$$XMMRegister, $a$$XMMRegister, $b$$Address, $c$$XMMRegister, vector_len); |
|
9555 |
%} |
|
9556 |
ins_pipe( pipe_slow ); |
|
9557 |
%} |
|
9558 |
||
9559 |
// a * b + c |
|
9560 |
instruct vfma8F_reg(vecY a, vecY b, vecY c) %{ |
|
9561 |
predicate(UseFMA && n->as_Vector()->length() == 8); |
|
9562 |
match(Set c (FmaVF c (Binary a b))); |
|
9563 |
format %{ "fmaps $a,$b,$c\t# $c = $a * $b + $c fma packed8F" %} |
|
9564 |
ins_cost(150); |
|
9565 |
ins_encode %{ |
|
9566 |
int vector_len = 1; |
|
9567 |
__ vfmaf($c$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $c$$XMMRegister, vector_len); |
|
9568 |
%} |
|
9569 |
ins_pipe( pipe_slow ); |
|
9570 |
%} |
|
9571 |
||
9572 |
// a * b + c |
|
9573 |
instruct vfma8F_mem(vecY a, memory b, vecY c) %{ |
|
9574 |
predicate(UseFMA && n->as_Vector()->length() == 8); |
|
9575 |
match(Set c (FmaVF c (Binary a (LoadVector b)))); |
|
9576 |
format %{ "fmaps $a,$b,$c\t# $c = $a * $b + $c fma packed8F" %} |
|
9577 |
ins_cost(150); |
|
9578 |
ins_encode %{ |
|
9579 |
int vector_len = 1; |
|
9580 |
__ vfmaf($c$$XMMRegister, $a$$XMMRegister, $b$$Address, $c$$XMMRegister, vector_len); |
|
9581 |
%} |
|
9582 |
ins_pipe( pipe_slow ); |
|
9583 |
%} |
|
9584 |
||
9585 |
// a * b + c |
|
9586 |
instruct vfma16F_reg(vecZ a, vecZ b, vecZ c) %{ |
|
9587 |
predicate(UseFMA && n->as_Vector()->length() == 16); |
|
9588 |
match(Set c (FmaVF c (Binary a b))); |
|
9589 |
format %{ "fmaps $a,$b,$c\t# $c = $a * $b + $c fma packed16F" %} |
|
9590 |
ins_cost(150); |
|
9591 |
ins_encode %{ |
|
9592 |
int vector_len = 2; |
|
9593 |
__ vfmaf($c$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $c$$XMMRegister, vector_len); |
|
9594 |
%} |
|
9595 |
ins_pipe( pipe_slow ); |
|
9596 |
%} |
|
9597 |
||
9598 |
// a * b + c |
|
9599 |
instruct vfma16F_mem(vecZ a, memory b, vecZ c) %{ |
|
9600 |
predicate(UseFMA && n->as_Vector()->length() == 16); |
|
9601 |
match(Set c (FmaVF c (Binary a (LoadVector b)))); |
|
9602 |
format %{ "fmaps $a,$b,$c\t# $c = $a * $b + $c fma packed16F" %} |
|
9603 |
ins_cost(150); |
|
9604 |
ins_encode %{ |
|
9605 |
int vector_len = 2; |
|
9606 |
__ vfmaf($c$$XMMRegister, $a$$XMMRegister, $b$$Address, $c$$XMMRegister, vector_len); |
|
9607 |
%} |
|
9608 |
ins_pipe( pipe_slow ); |
|
9609 |
%} |
|
49384 | 9610 |
|
52992 | 9611 |
// --------------------------------- Vector Multiply Add -------------------------------------- |
9612 |
||
9613 |
instruct smuladd4S2I_reg(vecD dst, vecD src1) %{ |
|
9614 |
predicate(UseSSE >= 2 && UseAVX == 0 && n->as_Vector()->length() == 2); |
|
9615 |
match(Set dst (MulAddVS2VI dst src1)); |
|
9616 |
format %{ "pmaddwd $dst,$dst,$src1\t! muladd packed4Sto2I" %} |
|
9617 |
ins_encode %{ |
|
9618 |
__ pmaddwd($dst$$XMMRegister, $src1$$XMMRegister); |
|
9619 |
%} |
|
9620 |
ins_pipe( pipe_slow ); |
|
9621 |
%} |
|
9622 |
||
9623 |
instruct vmuladd4S2I_reg(vecD dst, vecD src1, vecD src2) %{ |
|
9624 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 2); |
|
9625 |
match(Set dst (MulAddVS2VI src1 src2)); |
|
9626 |
format %{ "vpmaddwd $dst,$src1,$src2\t! muladd packed4Sto2I" %} |
|
9627 |
ins_encode %{ |
|
9628 |
int vector_len = 0; |
|
9629 |
__ vpmaddwd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
9630 |
%} |
|
9631 |
ins_pipe( pipe_slow ); |
|
9632 |
%} |
|
9633 |
||
9634 |
instruct smuladd8S4I_reg(vecX dst, vecX src1) %{ |
|
9635 |
predicate(UseSSE >= 2 && UseAVX == 0 && n->as_Vector()->length() == 4); |
|
9636 |
match(Set dst (MulAddVS2VI dst src1)); |
|
9637 |
format %{ "pmaddwd $dst,$dst,$src1\t! muladd packed8Sto4I" %} |
|
9638 |
ins_encode %{ |
|
9639 |
__ pmaddwd($dst$$XMMRegister, $src1$$XMMRegister); |
|
9640 |
%} |
|
9641 |
ins_pipe( pipe_slow ); |
|
9642 |
%} |
|
9643 |
||
9644 |
instruct vmuladd8S4I_reg(vecX dst, vecX src1, vecX src2) %{ |
|
9645 |
predicate(UseAVX > 0 && n->as_Vector()->length() == 4); |
|
9646 |
match(Set dst (MulAddVS2VI src1 src2)); |
|
9647 |
format %{ "vpmaddwd $dst,$src1,$src2\t! muladd packed8Sto4I" %} |
|
9648 |
ins_encode %{ |
|
9649 |
int vector_len = 0; |
|
9650 |
__ vpmaddwd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
9651 |
%} |
|
9652 |
ins_pipe( pipe_slow ); |
|
9653 |
%} |
|
9654 |
||
9655 |
instruct vmuladd16S8I_reg(vecY dst, vecY src1, vecY src2) %{ |
|
9656 |
predicate(UseAVX > 1 && n->as_Vector()->length() == 8); |
|
9657 |
match(Set dst (MulAddVS2VI src1 src2)); |
|
9658 |
format %{ "vpmaddwd $dst,$src1,$src2\t! muladd packed16Sto8I" %} |
|
9659 |
ins_encode %{ |
|
9660 |
int vector_len = 1; |
|
9661 |
__ vpmaddwd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
9662 |
%} |
|
9663 |
ins_pipe( pipe_slow ); |
|
9664 |
%} |
|
9665 |
||
9666 |
instruct vmuladd32S16I_reg(vecZ dst, vecZ src1, vecZ src2) %{ |
|
9667 |
predicate(UseAVX > 2 && n->as_Vector()->length() == 16); |
|
9668 |
match(Set dst (MulAddVS2VI src1 src2)); |
|
9669 |
format %{ "vpmaddwd $dst,$src1,$src2\t! muladd packed32Sto16I" %} |
|
9670 |
ins_encode %{ |
|
9671 |
int vector_len = 2; |
|
9672 |
__ vpmaddwd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
9673 |
%} |
|
9674 |
ins_pipe( pipe_slow ); |
|
9675 |
%} |
|
9676 |
||
9677 |
// --------------------------------- Vector Multiply Add Add ---------------------------------- |
|
9678 |
||
9679 |
instruct vmuladdadd4S2I_reg(vecD dst, vecD src1, vecD src2) %{ |
|
9680 |
predicate(VM_Version::supports_vnni() && UseAVX > 2 && n->as_Vector()->length() == 2); |
|
9681 |
match(Set dst (AddVI (MulAddVS2VI src1 src2) dst)); |
|
9682 |
format %{ "evpdpwssd $dst,$src1,$src2\t! muladdadd packed4Sto2I" %} |
|
9683 |
ins_encode %{ |
|
9684 |
int vector_len = 0; |
|
9685 |
__ evpdpwssd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
9686 |
%} |
|
9687 |
ins_pipe( pipe_slow ); |
|
53336
36ca868f266f
8216050: Superword optimization fails with assert(0 <= i && i < _len) failed: illegal index
vdeshpande
parents:
53171
diff
changeset
|
9688 |
ins_cost(10); |
52992 | 9689 |
%} |
9690 |
||
9691 |
instruct vmuladdadd8S4I_reg(vecX dst, vecX src1, vecX src2) %{ |
|
9692 |
predicate(VM_Version::supports_vnni() && UseAVX > 2 && n->as_Vector()->length() == 4); |
|
9693 |
match(Set dst (AddVI (MulAddVS2VI src1 src2) dst)); |
|
9694 |
format %{ "evpdpwssd $dst,$src1,$src2\t! muladdadd packed8Sto4I" %} |
|
9695 |
ins_encode %{ |
|
9696 |
int vector_len = 0; |
|
9697 |
__ evpdpwssd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
9698 |
%} |
|
9699 |
ins_pipe( pipe_slow ); |
|
53336
36ca868f266f
8216050: Superword optimization fails with assert(0 <= i && i < _len) failed: illegal index
vdeshpande
parents:
53171
diff
changeset
|
9700 |
ins_cost(10); |
52992 | 9701 |
%} |
9702 |
||
9703 |
instruct vmuladdadd16S8I_reg(vecY dst, vecY src1, vecY src2) %{ |
|
9704 |
predicate(VM_Version::supports_vnni() && UseAVX > 2 && n->as_Vector()->length() == 8); |
|
9705 |
match(Set dst (AddVI (MulAddVS2VI src1 src2) dst)); |
|
9706 |
format %{ "evpdpwssd $dst,$src1,$src2\t! muladdadd packed16Sto8I" %} |
|
9707 |
ins_encode %{ |
|
9708 |
int vector_len = 1; |
|
9709 |
__ evpdpwssd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
9710 |
%} |
|
9711 |
ins_pipe( pipe_slow ); |
|
53336
36ca868f266f
8216050: Superword optimization fails with assert(0 <= i && i < _len) failed: illegal index
vdeshpande
parents:
53171
diff
changeset
|
9712 |
ins_cost(10); |
52992 | 9713 |
%} |
9714 |
||
9715 |
instruct vmuladdadd32S16I_reg(vecZ dst, vecZ src1, vecZ src2) %{ |
|
9716 |
predicate(VM_Version::supports_vnni() && UseAVX > 2 && n->as_Vector()->length() == 16); |
|
9717 |
match(Set dst (AddVI (MulAddVS2VI src1 src2) dst)); |
|
9718 |
format %{ "evpdpwssd $dst,$src1,$src2\t! muladdadd packed32Sto16I" %} |
|
9719 |
ins_encode %{ |
|
9720 |
int vector_len = 2; |
|
9721 |
__ evpdpwssd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector_len); |
|
9722 |
%} |
|
9723 |
ins_pipe( pipe_slow ); |
|
53336
36ca868f266f
8216050: Superword optimization fails with assert(0 <= i && i < _len) failed: illegal index
vdeshpande
parents:
53171
diff
changeset
|
9724 |
ins_cost(10); |
52992 | 9725 |
%} |
9726 |
||
49384 | 9727 |
// --------------------------------- PopCount -------------------------------------- |
9728 |
||
9729 |
instruct vpopcount2I(vecD dst, vecD src) %{ |
|
9730 |
predicate(VM_Version::supports_vpopcntdq() && UsePopCountInstruction && n->as_Vector()->length() == 2); |
|
9731 |
match(Set dst (PopCountVI src)); |
|
9732 |
format %{ "vpopcntd $dst,$src\t! vector popcount packed2I" %} |
|
9733 |
ins_encode %{ |
|
9734 |
int vector_len = 0; |
|
9735 |
__ vpopcntd($dst$$XMMRegister, $src$$XMMRegister, vector_len); |
|
9736 |
%} |
|
9737 |
ins_pipe( pipe_slow ); |
|
9738 |
%} |
|
9739 |
||
9740 |
instruct vpopcount4I(vecX dst, vecX src) %{ |
|
9741 |
predicate(VM_Version::supports_vpopcntdq() && UsePopCountInstruction && n->as_Vector()->length() == 4); |
|
9742 |
match(Set dst (PopCountVI src)); |
|
9743 |
format %{ "vpopcntd $dst,$src\t! vector popcount packed4I" %} |
|
9744 |
ins_encode %{ |
|
9745 |
int vector_len = 0; |
|
9746 |
__ vpopcntd($dst$$XMMRegister, $src$$XMMRegister, vector_len); |
|
9747 |
%} |
|
9748 |
ins_pipe( pipe_slow ); |
|
9749 |
%} |
|
9750 |
||
9751 |
instruct vpopcount8I(vecY dst, vecY src) %{ |
|
9752 |
predicate(VM_Version::supports_vpopcntdq() && UsePopCountInstruction && n->as_Vector()->length() == 8); |
|
9753 |
match(Set dst (PopCountVI src)); |
|
9754 |
format %{ "vpopcntd $dst,$src\t! vector popcount packed8I" %} |
|
9755 |
ins_encode %{ |
|
9756 |
int vector_len = 1; |
|
9757 |
__ vpopcntd($dst$$XMMRegister, $src$$XMMRegister, vector_len); |
|
9758 |
%} |
|
9759 |
ins_pipe( pipe_slow ); |
|
9760 |
%} |
|
9761 |
||
9762 |
instruct vpopcount16I(vecZ dst, vecZ src) %{ |
|
9763 |
predicate(VM_Version::supports_vpopcntdq() && UsePopCountInstruction && n->as_Vector()->length() == 16); |
|
9764 |
match(Set dst (PopCountVI src)); |
|
9765 |
format %{ "vpopcntd $dst,$src\t! vector popcount packed16I" %} |
|
9766 |
ins_encode %{ |
|
9767 |
int vector_len = 2; |
|
9768 |
__ vpopcntd($dst$$XMMRegister, $src$$XMMRegister, vector_len); |
|
9769 |
%} |
|
9770 |
ins_pipe( pipe_slow ); |
|
9771 |
%} |