author | neliasso |
Tue, 01 Oct 2019 11:43:10 +0200 | |
changeset 58421 | 6fc57e391539 |
parent 57804 | 9b7b9f16dfd9 |
child 58679 | 9c3209ff7550 |
child 58682 | 9f5b92d5a1b2 |
permissions | -rw-r--r-- |
14626 | 1 |
/* |
53244
9807daeb47c4
8216167: Update include guards to reflect correct directories
coleenp
parents:
52990
diff
changeset
|
2 |
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. |
14626 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 |
* or visit www.oracle.com if you need additional information or have any |
|
21 |
* questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
53244
9807daeb47c4
8216167: Update include guards to reflect correct directories
coleenp
parents:
52990
diff
changeset
|
25 |
#ifndef CPU_X86_MACROASSEMBLER_X86_HPP |
9807daeb47c4
8216167: Update include guards to reflect correct directories
coleenp
parents:
52990
diff
changeset
|
26 |
#define CPU_X86_MACROASSEMBLER_X86_HPP |
14626 | 27 |
|
28 |
#include "asm/assembler.hpp" |
|
15482
470d0b0c09f1
8005915: Unify SERIALGC and INCLUDE_ALTERNATE_GCS
jprovino
parents:
15117
diff
changeset
|
29 |
#include "utilities/macros.hpp" |
23491 | 30 |
#include "runtime/rtmLocking.hpp" |
14626 | 31 |
|
32 |
// MacroAssembler extends Assembler by frequently used macros. |
|
33 |
// |
|
34 |
// Instructions for which a 'better' code sequence exists depending |
|
35 |
// on arguments should also go in here. |
|
36 |
||
37 |
class MacroAssembler: public Assembler { |
|
38 |
friend class LIR_Assembler; |
|
39 |
friend class Runtime1; // as_Address() |
|
40 |
||
49748 | 41 |
public: |
14626 | 42 |
// Support for VM calls |
43 |
// |
|
44 |
// This is the base routine called by the different versions of call_VM_leaf. The interpreter |
|
45 |
// may customize this version by overriding it for its purposes (e.g., to save/restore |
|
46 |
// additional registers when doing a VM call). |
|
33465 | 47 |
|
35214 | 48 |
virtual void call_VM_leaf_base( |
14626 | 49 |
address entry_point, // the entry point |
50 |
int number_of_arguments // the number of arguments to pop after the call |
|
51 |
); |
|
52 |
||
49748 | 53 |
protected: |
14626 | 54 |
// This is the base routine called by the different versions of call_VM. The interpreter |
55 |
// may customize this version by overriding it for its purposes (e.g., to save/restore |
|
56 |
// additional registers when doing a VM call). |
|
57 |
// |
|
58 |
// If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base |
|
59 |
// returns the register which contains the thread upon return. If a thread register has been |
|
60 |
// specified, the return value will correspond to that register. If no last_java_sp is specified |
|
61 |
// (noreg) than rsp will be used instead. |
|
35214 | 62 |
virtual void call_VM_base( // returns the register containing the thread upon return |
14626 | 63 |
Register oop_result, // where an oop-result ends up if any; use noreg otherwise |
64 |
Register java_thread, // the thread if computed before ; use noreg otherwise |
|
65 |
Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise |
|
66 |
address entry_point, // the entry point |
|
67 |
int number_of_arguments, // the number of arguments (w/o thread) to pop after the call |
|
68 |
bool check_exceptions // whether to check for pending exceptions after return |
|
69 |
); |
|
70 |
||
71 |
void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); |
|
72 |
||
73 |
// helpers for FPU flag access |
|
74 |
// tmp is a temporary register, if none is available use noreg |
|
75 |
void save_rax (Register tmp); |
|
76 |
void restore_rax(Register tmp); |
|
77 |
||
78 |
public: |
|
79 |
MacroAssembler(CodeBuffer* code) : Assembler(code) {} |
|
80 |
||
46294
345a46524a19
8172020: Internal Error (cpu/arm/vm/frame_arm.cpp:571): assert(obj == __null || Universe::heap()->is_in(obj)) failed: sanity check #
cjplummer
parents:
43423
diff
changeset
|
81 |
// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. |
345a46524a19
8172020: Internal Error (cpu/arm/vm/frame_arm.cpp:571): assert(obj == __null || Universe::heap()->is_in(obj)) failed: sanity check #
cjplummer
parents:
43423
diff
changeset
|
82 |
// The implementation is only non-empty for the InterpreterMacroAssembler, |
345a46524a19
8172020: Internal Error (cpu/arm/vm/frame_arm.cpp:571): assert(obj == __null || Universe::heap()->is_in(obj)) failed: sanity check #
cjplummer
parents:
43423
diff
changeset
|
83 |
// as only the interpreter handles PopFrame and ForceEarlyReturn requests. |
345a46524a19
8172020: Internal Error (cpu/arm/vm/frame_arm.cpp:571): assert(obj == __null || Universe::heap()->is_in(obj)) failed: sanity check #
cjplummer
parents:
43423
diff
changeset
|
84 |
virtual void check_and_handle_popframe(Register java_thread); |
345a46524a19
8172020: Internal Error (cpu/arm/vm/frame_arm.cpp:571): assert(obj == __null || Universe::heap()->is_in(obj)) failed: sanity check #
cjplummer
parents:
43423
diff
changeset
|
85 |
virtual void check_and_handle_earlyret(Register java_thread); |
345a46524a19
8172020: Internal Error (cpu/arm/vm/frame_arm.cpp:571): assert(obj == __null || Universe::heap()->is_in(obj)) failed: sanity check #
cjplummer
parents:
43423
diff
changeset
|
86 |
|
49748 | 87 |
Address as_Address(AddressLiteral adr); |
88 |
Address as_Address(ArrayAddress adr); |
|
89 |
||
14626 | 90 |
// Support for NULL-checks |
91 |
// |
|
92 |
// Generates code that causes a NULL OS exception if the content of reg is NULL. |
|
93 |
// If the accessed location is M[reg + offset] and the offset is known, provide the |
|
94 |
// offset. No explicit code generation is needed if the offset is within a certain |
|
95 |
// range (0 <= offset <= page_size). |
|
96 |
||
97 |
void null_check(Register reg, int offset = -1); |
|
98 |
static bool needs_explicit_null_check(intptr_t offset); |
|
52462
4ad404da0088
8213199: GC abstraction for Assembler::needs_explicit_null_check()
rkennke
parents:
52460
diff
changeset
|
99 |
static bool uses_implicit_null_check(void* address); |
14626 | 100 |
|
101 |
// Required platform-specific helpers for Label::patch_instructions. |
|
102 |
// They _shadow_ the declarations in AbstractAssembler, which are undefined. |
|
51633
21154cb84d2a
8209594: guarantee(this->is8bit(imm8)) failed: Short forward jump exceeds 8-bit offset
kvn
parents:
51464
diff
changeset
|
103 |
void pd_patch_instruction(address branch, address target, const char* file, int line) { |
14626 | 104 |
unsigned char op = branch[0]; |
105 |
assert(op == 0xE8 /* call */ || |
|
106 |
op == 0xE9 /* jmp */ || |
|
107 |
op == 0xEB /* short jmp */ || |
|
108 |
(op & 0xF0) == 0x70 /* short jcc */ || |
|
23491 | 109 |
op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ || |
110 |
op == 0xC7 && branch[1] == 0xF8 /* xbegin */, |
|
14626 | 111 |
"Invalid opcode at patch point"); |
112 |
||
113 |
if (op == 0xEB || (op & 0xF0) == 0x70) { |
|
114 |
// short offset operators (jmp and jcc) |
|
115 |
char* disp = (char*) &branch[1]; |
|
116 |
int imm8 = target - (address) &disp[1]; |
|
51633
21154cb84d2a
8209594: guarantee(this->is8bit(imm8)) failed: Short forward jump exceeds 8-bit offset
kvn
parents:
51464
diff
changeset
|
117 |
guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d", file, line); |
14626 | 118 |
*disp = imm8; |
119 |
} else { |
|
23491 | 120 |
int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1]; |
14626 | 121 |
int imm32 = target - (address) &disp[1]; |
122 |
*disp = imm32; |
|
123 |
} |
|
124 |
} |
|
125 |
||
126 |
// The following 4 methods return the offset of the appropriate move instruction |
|
127 |
||
128 |
// Support for fast byte/short loading with zero extension (depending on particular CPU) |
|
129 |
int load_unsigned_byte(Register dst, Address src); |
|
130 |
int load_unsigned_short(Register dst, Address src); |
|
131 |
||
132 |
// Support for fast byte/short loading with sign extension (depending on particular CPU) |
|
133 |
int load_signed_byte(Register dst, Address src); |
|
134 |
int load_signed_short(Register dst, Address src); |
|
135 |
||
136 |
// Support for sign-extension (hi:lo = extend_sign(lo)) |
|
137 |
void extend_sign(Register hi, Register lo); |
|
138 |
||
139 |
// Load and store values by size and signed-ness |
|
140 |
void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg); |
|
141 |
void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg); |
|
142 |
||
143 |
// Support for inc/dec with optimal instruction selection depending on value |
|
144 |
||
145 |
void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; } |
|
146 |
void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; } |
|
147 |
||
148 |
void decrementl(Address dst, int value = 1); |
|
149 |
void decrementl(Register reg, int value = 1); |
|
150 |
||
151 |
void decrementq(Register reg, int value = 1); |
|
152 |
void decrementq(Address dst, int value = 1); |
|
153 |
||
154 |
void incrementl(Address dst, int value = 1); |
|
155 |
void incrementl(Register reg, int value = 1); |
|
156 |
||
157 |
void incrementq(Register reg, int value = 1); |
|
158 |
void incrementq(Address dst, int value = 1); |
|
159 |
||
52003
be4614f04eb6
8211375: Minimal VM build failures after JDK-8211251 (Default mask register for avx512 instructions)
shade
parents:
51857
diff
changeset
|
160 |
#ifdef COMPILER2 |
38049 | 161 |
// special instructions for EVEX |
162 |
void setvectmask(Register dst, Register src); |
|
163 |
void restorevectmask(); |
|
52003
be4614f04eb6
8211375: Minimal VM build failures after JDK-8211251 (Default mask register for avx512 instructions)
shade
parents:
51857
diff
changeset
|
164 |
#endif |
38049 | 165 |
|
14626 | 166 |
// Support optimal SSE move instructions. |
167 |
void movflt(XMMRegister dst, XMMRegister src) { |
|
54022
ff399127078a
8217561: X86: Add floating-point Math.min/max intrinsics
bsrbnd
parents:
53244
diff
changeset
|
168 |
if (dst-> encoding() == src->encoding()) return; |
14626 | 169 |
if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; } |
170 |
else { movss (dst, src); return; } |
|
171 |
} |
|
172 |
void movflt(XMMRegister dst, Address src) { movss(dst, src); } |
|
173 |
void movflt(XMMRegister dst, AddressLiteral src); |
|
174 |
void movflt(Address dst, XMMRegister src) { movss(dst, src); } |
|
175 |
||
176 |
void movdbl(XMMRegister dst, XMMRegister src) { |
|
54022
ff399127078a
8217561: X86: Add floating-point Math.min/max intrinsics
bsrbnd
parents:
53244
diff
changeset
|
177 |
if (dst-> encoding() == src->encoding()) return; |
14626 | 178 |
if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; } |
179 |
else { movsd (dst, src); return; } |
|
180 |
} |
|
181 |
||
182 |
void movdbl(XMMRegister dst, AddressLiteral src); |
|
183 |
||
184 |
void movdbl(XMMRegister dst, Address src) { |
|
185 |
if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; } |
|
186 |
else { movlpd(dst, src); return; } |
|
187 |
} |
|
188 |
void movdbl(Address dst, XMMRegister src) { movsd(dst, src); } |
|
189 |
||
190 |
void incrementl(AddressLiteral dst); |
|
191 |
void incrementl(ArrayAddress dst); |
|
192 |
||
23491 | 193 |
void incrementq(AddressLiteral dst); |
194 |
||
14626 | 195 |
// Alignment |
196 |
void align(int modulus); |
|
32203 | 197 |
void align(int modulus, int target); |
14626 | 198 |
|
199 |
// A 5 byte nop that is safe for patching (see patch_verified_entry) |
|
200 |
void fat_nop(); |
|
201 |
||
202 |
// Stack frame creation/removal |
|
203 |
void enter(); |
|
204 |
void leave(); |
|
205 |
||
206 |
// Support for getting the JavaThread pointer (i.e.; a reference to thread-local information) |
|
207 |
// The pointer will be loaded into the thread register. |
|
208 |
void get_thread(Register thread); |
|
209 |
||
210 |
||
211 |
// Support for VM calls |
|
212 |
// |
|
213 |
// It is imperative that all calls into the VM are handled via the call_VM macros. |
|
214 |
// They make sure that the stack linkage is setup correctly. call_VM's correspond |
|
215 |
// to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. |
|
216 |
||
217 |
||
218 |
void call_VM(Register oop_result, |
|
219 |
address entry_point, |
|
220 |
bool check_exceptions = true); |
|
221 |
void call_VM(Register oop_result, |
|
222 |
address entry_point, |
|
223 |
Register arg_1, |
|
224 |
bool check_exceptions = true); |
|
225 |
void call_VM(Register oop_result, |
|
226 |
address entry_point, |
|
227 |
Register arg_1, Register arg_2, |
|
228 |
bool check_exceptions = true); |
|
229 |
void call_VM(Register oop_result, |
|
230 |
address entry_point, |
|
231 |
Register arg_1, Register arg_2, Register arg_3, |
|
232 |
bool check_exceptions = true); |
|
233 |
||
234 |
// Overloadings with last_Java_sp |
|
235 |
void call_VM(Register oop_result, |
|
236 |
Register last_java_sp, |
|
237 |
address entry_point, |
|
238 |
int number_of_arguments = 0, |
|
239 |
bool check_exceptions = true); |
|
240 |
void call_VM(Register oop_result, |
|
241 |
Register last_java_sp, |
|
242 |
address entry_point, |
|
243 |
Register arg_1, bool |
|
244 |
check_exceptions = true); |
|
245 |
void call_VM(Register oop_result, |
|
246 |
Register last_java_sp, |
|
247 |
address entry_point, |
|
248 |
Register arg_1, Register arg_2, |
|
249 |
bool check_exceptions = true); |
|
250 |
void call_VM(Register oop_result, |
|
251 |
Register last_java_sp, |
|
252 |
address entry_point, |
|
253 |
Register arg_1, Register arg_2, Register arg_3, |
|
254 |
bool check_exceptions = true); |
|
255 |
||
256 |
void get_vm_result (Register oop_result, Register thread); |
|
257 |
void get_vm_result_2(Register metadata_result, Register thread); |
|
258 |
||
259 |
// These always tightly bind to MacroAssembler::call_VM_base |
|
260 |
// bypassing the virtual implementation |
|
261 |
void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); |
|
262 |
void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); |
|
263 |
void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); |
|
264 |
void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); |
|
265 |
void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true); |
|
266 |
||
38699
f8bec5f6b09c
8154473: Update for CompilerDirectives to control stub generation and intrinsics
vdeshpande
parents:
38241
diff
changeset
|
267 |
void call_VM_leaf0(address entry_point); |
14626 | 268 |
void call_VM_leaf(address entry_point, |
269 |
int number_of_arguments = 0); |
|
270 |
void call_VM_leaf(address entry_point, |
|
271 |
Register arg_1); |
|
272 |
void call_VM_leaf(address entry_point, |
|
273 |
Register arg_1, Register arg_2); |
|
274 |
void call_VM_leaf(address entry_point, |
|
275 |
Register arg_1, Register arg_2, Register arg_3); |
|
276 |
||
277 |
// These always tightly bind to MacroAssembler::call_VM_leaf_base |
|
278 |
// bypassing the virtual implementation |
|
279 |
void super_call_VM_leaf(address entry_point); |
|
280 |
void super_call_VM_leaf(address entry_point, Register arg_1); |
|
281 |
void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); |
|
282 |
void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); |
|
283 |
void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); |
|
284 |
||
285 |
// last Java Frame (fills frame anchor) |
|
286 |
void set_last_Java_frame(Register thread, |
|
287 |
Register last_java_sp, |
|
288 |
Register last_java_fp, |
|
289 |
address last_java_pc); |
|
290 |
||
291 |
// thread in the default location (r15_thread on 64bit) |
|
292 |
void set_last_Java_frame(Register last_java_sp, |
|
293 |
Register last_java_fp, |
|
294 |
address last_java_pc); |
|
295 |
||
40644
39e631ed7145
8161598: Kitchensink fails: assert(nm->insts_contains(original_pc)) failed: original PC must be in nmethod/CompiledMethod
dlong
parents:
38699
diff
changeset
|
296 |
void reset_last_Java_frame(Register thread, bool clear_fp); |
14626 | 297 |
|
298 |
// thread in the default location (r15_thread on 64bit) |
|
40644
39e631ed7145
8161598: Kitchensink fails: assert(nm->insts_contains(original_pc)) failed: original PC must be in nmethod/CompiledMethod
dlong
parents:
38699
diff
changeset
|
299 |
void reset_last_Java_frame(bool clear_fp); |
14626 | 300 |
|
49748 | 301 |
// jobjects |
44406
a46a6c4d1dd9
8176100: [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles
mgerdin
parents:
44093
diff
changeset
|
302 |
void clear_jweak_tag(Register possibly_jweak); |
49748 | 303 |
void resolve_jobject(Register value, Register thread, Register tmp); |
14626 | 304 |
|
305 |
// C 'boolean' to Java boolean: x == 0 ? 0 : 1 |
|
306 |
void c2bool(Register x); |
|
307 |
||
308 |
// C++ bool manipulation |
|
309 |
||
310 |
void movbool(Register dst, Address src); |
|
311 |
void movbool(Address dst, bool boolconst); |
|
312 |
void movbool(Address dst, Register src); |
|
313 |
void testbool(Register dst); |
|
314 |
||
49816 | 315 |
void resolve_oop_handle(Register result, Register tmp = rscratch2); |
54839
e9db10a375d9
8222841: Incorrect static call stub interactions with class unloading
eosterlund
parents:
54750
diff
changeset
|
316 |
void resolve_weak_handle(Register result, Register tmp); |
49816 | 317 |
void load_mirror(Register mirror, Register method, Register tmp = rscratch2); |
54839
e9db10a375d9
8222841: Incorrect static call stub interactions with class unloading
eosterlund
parents:
54750
diff
changeset
|
318 |
void load_method_holder_cld(Register rresult, Register rmethod); |
38074
8475fdc6dcc3
8154580: Save mirror in interpreter frame to enable cleanups of CLDClosure
coleenp
parents:
38049
diff
changeset
|
319 |
|
55105
9ad765641e8f
8223213: Implement fast class initialization checks on x86-64
vlivanov
parents:
54839
diff
changeset
|
320 |
void load_method_holder(Register holder, Register method); |
9ad765641e8f
8223213: Implement fast class initialization checks on x86-64
vlivanov
parents:
54839
diff
changeset
|
321 |
|
14626 | 322 |
// oop manipulations |
323 |
void load_klass(Register dst, Register src); |
|
324 |
void store_klass(Register dst, Register src); |
|
325 |
||
49748 | 326 |
void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, |
327 |
Register tmp1, Register thread_tmp); |
|
328 |
void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register src, |
|
329 |
Register tmp1, Register tmp2); |
|
330 |
||
51350 | 331 |
// Resolves obj access. Result is placed in the same register. |
332 |
// All other registers are preserved. |
|
333 |
void resolve(DecoratorSet decorators, Register obj); |
|
334 |
||
49748 | 335 |
void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, |
336 |
Register thread_tmp = noreg, DecoratorSet decorators = 0); |
|
337 |
void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg, |
|
338 |
Register thread_tmp = noreg, DecoratorSet decorators = 0); |
|
339 |
void store_heap_oop(Address dst, Register src, Register tmp1 = noreg, |
|
340 |
Register tmp2 = noreg, DecoratorSet decorators = 0); |
|
14626 | 341 |
|
342 |
// Used for storing NULL. All other oop constants should be |
|
343 |
// stored using routines that take a jobject. |
|
344 |
void store_heap_oop_null(Address dst); |
|
345 |
||
346 |
void load_prototype_header(Register dst, Register src); |
|
347 |
||
348 |
#ifdef _LP64 |
|
349 |
void store_klass_gap(Register dst, Register src); |
|
350 |
||
351 |
// This dummy is to prevent a call to store_heap_oop from |
|
352 |
// converting a zero (like NULL) into a Register by giving |
|
353 |
// the compiler two choices it can't resolve |
|
354 |
||
355 |
void store_heap_oop(Address dst, void* dummy); |
|
356 |
||
357 |
void encode_heap_oop(Register r); |
|
358 |
void decode_heap_oop(Register r); |
|
359 |
void encode_heap_oop_not_null(Register r); |
|
360 |
void decode_heap_oop_not_null(Register r); |
|
361 |
void encode_heap_oop_not_null(Register dst, Register src); |
|
362 |
void decode_heap_oop_not_null(Register dst, Register src); |
|
363 |
||
364 |
void set_narrow_oop(Register dst, jobject obj); |
|
365 |
void set_narrow_oop(Address dst, jobject obj); |
|
366 |
void cmp_narrow_oop(Register dst, jobject obj); |
|
367 |
void cmp_narrow_oop(Address dst, jobject obj); |
|
368 |
||
369 |
void encode_klass_not_null(Register r); |
|
370 |
void decode_klass_not_null(Register r); |
|
371 |
void encode_klass_not_null(Register dst, Register src); |
|
372 |
void decode_klass_not_null(Register dst, Register src); |
|
373 |
void set_narrow_klass(Register dst, Klass* k); |
|
374 |
void set_narrow_klass(Address dst, Klass* k); |
|
375 |
void cmp_narrow_klass(Register dst, Klass* k); |
|
376 |
void cmp_narrow_klass(Address dst, Klass* k); |
|
377 |
||
19319
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
378 |
// Returns the byte size of the instructions generated by decode_klass_not_null() |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
379 |
// when compressed klass pointers are being used. |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
380 |
static int instr_size_for_decode_klass_not_null(); |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
381 |
|
14626 | 382 |
// if heap base register is used - reinit it with the correct value |
383 |
void reinit_heapbase(); |
|
384 |
||
385 |
DEBUG_ONLY(void verify_heapbase(const char* msg);) |
|
386 |
||
387 |
#endif // _LP64 |
|
388 |
||
389 |
// Int division/remainder for Java |
|
390 |
// (as idivl, but checks for special case as described in JVM spec.) |
|
391 |
// returns idivl instruction offset for implicit exception handling |
|
392 |
int corrected_idivl(Register reg); |
|
393 |
||
394 |
// Long division/remainder for Java |
|
395 |
// (as idivq, but checks for special case as described in JVM spec.) |
|
396 |
// returns idivq instruction offset for implicit exception handling |
|
397 |
int corrected_idivq(Register reg); |
|
398 |
||
399 |
void int3(); |
|
400 |
||
401 |
// Long operation macros for a 32bit cpu |
|
402 |
// Long negation for Java |
|
403 |
void lneg(Register hi, Register lo); |
|
404 |
||
405 |
// Long multiplication for Java |
|
406 |
// (destroys contents of eax, ebx, ecx and edx) |
|
407 |
void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y |
|
408 |
||
409 |
// Long shifts for Java |
|
410 |
// (semantics as described in JVM spec.) |
|
411 |
void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f) |
|
412 |
void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f) |
|
413 |
||
414 |
// Long compare for Java |
|
415 |
// (semantics as described in JVM spec.) |
|
416 |
void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y) |
|
417 |
||
418 |
||
419 |
// misc |
|
420 |
||
421 |
// Sign extension |
|
422 |
void sign_extend_short(Register reg); |
|
423 |
void sign_extend_byte(Register reg); |
|
424 |
||
425 |
// Division by power of 2, rounding towards 0 |
|
426 |
void division_with_shift(Register reg, int shift_value); |
|
427 |
||
428 |
// Compares the top-most stack entries on the FPU stack and sets the eflags as follows: |
|
429 |
// |
|
430 |
// CF (corresponds to C0) if x < y |
|
431 |
// PF (corresponds to C2) if unordered |
|
432 |
// ZF (corresponds to C3) if x = y |
|
433 |
// |
|
434 |
// The arguments are in reversed order on the stack (i.e., top of stack is first argument). |
|
435 |
// tmp is a temporary register, if none is available use noreg (only matters for non-P6 code) |
|
436 |
void fcmp(Register tmp); |
|
437 |
// Variant of the above which allows y to be further down the stack |
|
438 |
// and which only pops x and y if specified. If pop_right is |
|
439 |
// specified then pop_left must also be specified. |
|
440 |
void fcmp(Register tmp, int index, bool pop_left, bool pop_right); |
|
441 |
||
442 |
// Floating-point comparison for Java |
|
443 |
// Compares the top-most stack entries on the FPU stack and stores the result in dst. |
|
444 |
// The arguments are in reversed order on the stack (i.e., top of stack is first argument). |
|
445 |
// (semantics as described in JVM spec.) |
|
446 |
void fcmp2int(Register dst, bool unordered_is_less); |
|
447 |
// Variant of the above which allows y to be further down the stack |
|
448 |
// and which only pops x and y if specified. If pop_right is |
|
449 |
// specified then pop_left must also be specified. |
|
450 |
void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right); |
|
451 |
||
452 |
// Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards) |
|
453 |
// tmp is a temporary register, if none is available use noreg |
|
454 |
void fremr(Register tmp); |
|
455 |
||
41323 | 456 |
// dst = c = a * b + c |
457 |
void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); |
|
458 |
void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); |
|
459 |
||
46528 | 460 |
void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); |
461 |
void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); |
|
462 |
void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); |
|
463 |
void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); |
|
464 |
||
14626 | 465 |
|
466 |
// same as fcmp2int, but using SSE2 |
|
467 |
void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); |
|
468 |
void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); |
|
469 |
||
470 |
// branch to L if FPU flag C2 is set/not set |
|
471 |
// tmp is a temporary register, if none is available use noreg |
|
472 |
void jC2 (Register tmp, Label& L); |
|
473 |
void jnC2(Register tmp, Label& L); |
|
474 |
||
475 |
// Pop ST (ffree & fincstp combined) |
|
476 |
void fpop(); |
|
477 |
||
32391
01e2f5e916c7
8076373: In 32-bit VM interpreter and compiled code process NaN values differently
zmajo
parents:
32203
diff
changeset
|
478 |
// Load float value from 'address'. If UseSSE >= 1, the value is loaded into |
01e2f5e916c7
8076373: In 32-bit VM interpreter and compiled code process NaN values differently
zmajo
parents:
32203
diff
changeset
|
479 |
// register xmm0. Otherwise, the value is loaded onto the FPU stack. |
01e2f5e916c7
8076373: In 32-bit VM interpreter and compiled code process NaN values differently
zmajo
parents:
32203
diff
changeset
|
480 |
void load_float(Address src); |
01e2f5e916c7
8076373: In 32-bit VM interpreter and compiled code process NaN values differently
zmajo
parents:
32203
diff
changeset
|
481 |
|
01e2f5e916c7
8076373: In 32-bit VM interpreter and compiled code process NaN values differently
zmajo
parents:
32203
diff
changeset
|
482 |
// Store float value to 'address'. If UseSSE >= 1, the value is stored |
01e2f5e916c7
8076373: In 32-bit VM interpreter and compiled code process NaN values differently
zmajo
parents:
32203
diff
changeset
|
483 |
// from register xmm0. Otherwise, the value is stored from the FPU stack. |
01e2f5e916c7
8076373: In 32-bit VM interpreter and compiled code process NaN values differently
zmajo
parents:
32203
diff
changeset
|
484 |
void store_float(Address dst); |
01e2f5e916c7
8076373: In 32-bit VM interpreter and compiled code process NaN values differently
zmajo
parents:
32203
diff
changeset
|
485 |
|
01e2f5e916c7
8076373: In 32-bit VM interpreter and compiled code process NaN values differently
zmajo
parents:
32203
diff
changeset
|
486 |
// Load double value from 'address'. If UseSSE >= 2, the value is loaded into |
01e2f5e916c7
8076373: In 32-bit VM interpreter and compiled code process NaN values differently
zmajo
parents:
32203
diff
changeset
|
487 |
// register xmm0. Otherwise, the value is loaded onto the FPU stack. |
01e2f5e916c7
8076373: In 32-bit VM interpreter and compiled code process NaN values differently
zmajo
parents:
32203
diff
changeset
|
488 |
void load_double(Address src); |
01e2f5e916c7
8076373: In 32-bit VM interpreter and compiled code process NaN values differently
zmajo
parents:
32203
diff
changeset
|
489 |
|
01e2f5e916c7
8076373: In 32-bit VM interpreter and compiled code process NaN values differently
zmajo
parents:
32203
diff
changeset
|
490 |
// Store double value to 'address'. If UseSSE >= 2, the value is stored |
01e2f5e916c7
8076373: In 32-bit VM interpreter and compiled code process NaN values differently
zmajo
parents:
32203
diff
changeset
|
491 |
// from register xmm0. Otherwise, the value is stored from the FPU stack. |
01e2f5e916c7
8076373: In 32-bit VM interpreter and compiled code process NaN values differently
zmajo
parents:
32203
diff
changeset
|
492 |
void store_double(Address dst); |
01e2f5e916c7
8076373: In 32-bit VM interpreter and compiled code process NaN values differently
zmajo
parents:
32203
diff
changeset
|
493 |
|
14626 | 494 |
// pushes double TOS element of FPU stack on CPU stack; pops from FPU stack |
495 |
void push_fTOS(); |
|
496 |
||
497 |
// pops double TOS element from CPU stack and pushes on FPU stack |
|
498 |
void pop_fTOS(); |
|
499 |
||
500 |
void empty_FPU_stack(); |
|
501 |
||
502 |
void push_IU_state(); |
|
503 |
void pop_IU_state(); |
|
504 |
||
505 |
void push_FPU_state(); |
|
506 |
void pop_FPU_state(); |
|
507 |
||
508 |
void push_CPU_state(); |
|
509 |
void pop_CPU_state(); |
|
510 |
||
511 |
// Round up to a power of two |
|
512 |
void round_to(Register reg, int modulus); |
|
513 |
||
514 |
// Callee saved registers handling |
|
515 |
void push_callee_saved_registers(); |
|
516 |
void pop_callee_saved_registers(); |
|
517 |
||
518 |
// allocation |
|
519 |
void eden_allocate( |
|
50693 | 520 |
Register thread, // Current thread |
14626 | 521 |
Register obj, // result: pointer to object after successful allocation |
522 |
Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise |
|
523 |
int con_size_in_bytes, // object size in bytes if known at compile time |
|
524 |
Register t1, // temp register |
|
525 |
Label& slow_case // continuation point if fast allocation fails |
|
526 |
); |
|
527 |
void tlab_allocate( |
|
50693 | 528 |
Register thread, // Current thread |
14626 | 529 |
Register obj, // result: pointer to object after successful allocation |
530 |
Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise |
|
531 |
int con_size_in_bytes, // object size in bytes if known at compile time |
|
532 |
Register t1, // temp register |
|
533 |
Register t2, // temp register |
|
534 |
Label& slow_case // continuation point if fast allocation fails |
|
535 |
); |
|
35548
8d3afe96ffea
8086053: Address inconsistencies regarding ZeroTLAB
zmajo
parents:
35546
diff
changeset
|
536 |
void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp); |
8d3afe96ffea
8086053: Address inconsistencies regarding ZeroTLAB
zmajo
parents:
35546
diff
changeset
|
537 |
|
14626 | 538 |
// interface method calling |
539 |
void lookup_interface_method(Register recv_klass, |
|
540 |
Register intf_klass, |
|
541 |
RegisterOrConstant itable_index, |
|
542 |
Register method_result, |
|
543 |
Register scan_temp, |
|
48557 | 544 |
Label& no_such_interface, |
545 |
bool return_method = true); |
|
14626 | 546 |
|
547 |
// virtual method calling |
|
548 |
void lookup_virtual_method(Register recv_klass, |
|
549 |
RegisterOrConstant vtable_index, |
|
550 |
Register method_result); |
|
551 |
||
552 |
// Test sub_klass against super_klass, with fast and slow paths. |
|
553 |
||
554 |
// The fast path produces a tri-state answer: yes / no / maybe-slow. |
|
555 |
// One of the three labels can be NULL, meaning take the fall-through. |
|
556 |
// If super_check_offset is -1, the value is loaded up from super_klass. |
|
557 |
// No registers are killed, except temp_reg. |
|
558 |
void check_klass_subtype_fast_path(Register sub_klass, |
|
559 |
Register super_klass, |
|
560 |
Register temp_reg, |
|
561 |
Label* L_success, |
|
562 |
Label* L_failure, |
|
563 |
Label* L_slow_path, |
|
564 |
RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); |
|
565 |
||
566 |
// The rest of the type check; must be wired to a corresponding fast path. |
|
567 |
// It does not repeat the fast path logic, so don't use it standalone. |
|
568 |
// The temp_reg and temp2_reg can be noreg, if no temps are available. |
|
569 |
// Updates the sub's secondary super cache as necessary. |
|
570 |
// If set_cond_codes, condition codes will be Z on success, NZ on failure. |
|
571 |
void check_klass_subtype_slow_path(Register sub_klass, |
|
572 |
Register super_klass, |
|
573 |
Register temp_reg, |
|
574 |
Register temp2_reg, |
|
575 |
Label* L_success, |
|
576 |
Label* L_failure, |
|
577 |
bool set_cond_codes = false); |
|
578 |
||
579 |
// Simplified, combined version, good for typical uses. |
|
580 |
// Falls through on failure. |
|
581 |
void check_klass_subtype(Register sub_klass, |
|
582 |
Register super_klass, |
|
583 |
Register temp_reg, |
|
584 |
Label& L_success); |
|
585 |
||
55105
9ad765641e8f
8223213: Implement fast class initialization checks on x86-64
vlivanov
parents:
54839
diff
changeset
|
586 |
void clinit_barrier(Register klass, |
9ad765641e8f
8223213: Implement fast class initialization checks on x86-64
vlivanov
parents:
54839
diff
changeset
|
587 |
Register thread, |
9ad765641e8f
8223213: Implement fast class initialization checks on x86-64
vlivanov
parents:
54839
diff
changeset
|
588 |
Label* L_fast_path = NULL, |
9ad765641e8f
8223213: Implement fast class initialization checks on x86-64
vlivanov
parents:
54839
diff
changeset
|
589 |
Label* L_slow_path = NULL); |
9ad765641e8f
8223213: Implement fast class initialization checks on x86-64
vlivanov
parents:
54839
diff
changeset
|
590 |
|
14626 | 591 |
// method handles (JSR 292) |
592 |
Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); |
|
593 |
||
594 |
//---- |
|
595 |
void set_word_if_not_zero(Register reg); // sets reg to 1 if not zero, otherwise 0 |
|
596 |
||
597 |
// Debugging |
|
598 |
||
599 |
// only if +VerifyOops |
|
600 |
// TODO: Make these macros with file and line like sparc version! |
|
601 |
void verify_oop(Register reg, const char* s = "broken oop"); |
|
602 |
void verify_oop_addr(Address addr, const char * s = "broken oop addr"); |
|
603 |
||
604 |
// TODO: verify method and klass metadata (compare against vptr?) |
|
605 |
void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} |
|
606 |
void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){} |
|
607 |
||
608 |
#define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) |
|
609 |
#define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) |
|
610 |
||
611 |
// only if +VerifyFPU |
|
612 |
void verify_FPU(int stack_depth, const char* s = "illegal FPU state"); |
|
613 |
||
16624
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
15483
diff
changeset
|
614 |
// Verify or restore cpu control state after JNI call |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
15483
diff
changeset
|
615 |
void restore_cpu_control_state_after_jni(); |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
15483
diff
changeset
|
616 |
|
14626 | 617 |
// prints msg, dumps registers and stops execution |
618 |
void stop(const char* msg); |
|
619 |
||
620 |
// prints msg and continues |
|
621 |
void warn(const char* msg); |
|
622 |
||
623 |
// dumps registers and other state |
|
624 |
void print_state(); |
|
625 |
||
626 |
static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg); |
|
627 |
static void debug64(char* msg, int64_t pc, int64_t regs[]); |
|
628 |
static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip); |
|
629 |
static void print_state64(int64_t pc, int64_t regs[]); |
|
630 |
||
631 |
void os_breakpoint(); |
|
632 |
||
633 |
void untested() { stop("untested"); } |
|
634 |
||
46560
388aa8d67c80
8181449: Fix debug.hpp / globalDefinitions.hpp dependency inversion
kbarrett
parents:
46530
diff
changeset
|
635 |
void unimplemented(const char* what = ""); |
14626 | 636 |
|
637 |
void should_not_reach_here() { stop("should not reach here"); } |
|
638 |
||
639 |
void print_CPU_state(); |
|
640 |
||
641 |
// Stack overflow checking |
|
642 |
void bang_stack_with_offset(int offset) { |
|
643 |
// stack grows down, caller passes positive offset |
|
644 |
assert(offset > 0, "must bang with negative offset"); |
|
645 |
movl(Address(rsp, (-offset)), rax); |
|
646 |
} |
|
647 |
||
648 |
// Writes to stack successive pages until offset reached to check for |
|
649 |
// stack overflow + shadow pages. Also, clobbers tmp |
|
650 |
void bang_stack_size(Register size, Register tmp); |
|
651 |
||
35071
a0910b1d3e0d
8046936: JEP 270: Reserved Stack Areas for Critical Sections
fparain
parents:
34203
diff
changeset
|
652 |
// Check for reserved stack access in method being exited (for JIT) |
a0910b1d3e0d
8046936: JEP 270: Reserved Stack Areas for Critical Sections
fparain
parents:
34203
diff
changeset
|
653 |
void reserved_stack_check(); |
a0910b1d3e0d
8046936: JEP 270: Reserved Stack Areas for Critical Sections
fparain
parents:
34203
diff
changeset
|
654 |
|
14626 | 655 |
virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, |
656 |
Register tmp, |
|
657 |
int offset); |
|
658 |
||
49027
8dc742d9bbab
8195112: x86 (32 bit): implementation for Thread-local handshakes
mdoerr
parents:
49010
diff
changeset
|
659 |
// If thread_reg is != noreg the code assumes the register passed contains |
8dc742d9bbab
8195112: x86 (32 bit): implementation for Thread-local handshakes
mdoerr
parents:
49010
diff
changeset
|
660 |
// the thread (required on 64 bit). |
47881
0ce0ac68ace7
8189941: Implementation JEP 312: Thread-local handshake
rehn
parents:
47683
diff
changeset
|
661 |
void safepoint_poll(Label& slow_path, Register thread_reg, Register temp_reg); |
0ce0ac68ace7
8189941: Implementation JEP 312: Thread-local handshake
rehn
parents:
47683
diff
changeset
|
662 |
|
14626 | 663 |
void verify_tlab(); |
664 |
||
665 |
// Biased locking support |
|
666 |
// lock_reg and obj_reg must be loaded up with the appropriate values. |
|
667 |
// swap_reg must be rax, and is killed. |
|
668 |
// tmp_reg is optional. If it is supplied (i.e., != noreg) it will |
|
669 |
// be killed; if not supplied, push/pop will be used internally to |
|
670 |
// allocate a temporary (inefficient, avoid if possible). |
|
671 |
// Optional slow case is for implementations (interpreter and C1) which branch to |
|
672 |
// slow case directly. Leaves condition codes set for C2's Fast_Lock node. |
|
673 |
// Returns offset of first potentially-faulting instruction for null |
|
674 |
// check info (currently consumed only by C1). If |
|
675 |
// swap_reg_contains_mark is true then returns -1 as it is assumed |
|
676 |
// the calling code has already passed any potential faults. |
|
677 |
int biased_locking_enter(Register lock_reg, Register obj_reg, |
|
678 |
Register swap_reg, Register tmp_reg, |
|
679 |
bool swap_reg_contains_mark, |
|
680 |
Label& done, Label* slow_case = NULL, |
|
681 |
BiasedLockingCounters* counters = NULL); |
|
682 |
void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done); |
|
22910
88c3369b5967
8033805: Move Fast_Lock/Fast_Unlock code from .ad files to macroassembler
kvn
parents:
20702
diff
changeset
|
683 |
#ifdef COMPILER2 |
88c3369b5967
8033805: Move Fast_Lock/Fast_Unlock code from .ad files to macroassembler
kvn
parents:
20702
diff
changeset
|
684 |
// Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file. |
88c3369b5967
8033805: Move Fast_Lock/Fast_Unlock code from .ad files to macroassembler
kvn
parents:
20702
diff
changeset
|
685 |
// See full desription in macroAssembler_x86.cpp. |
23491 | 686 |
void fast_lock(Register obj, Register box, Register tmp, |
687 |
Register scr, Register cx1, Register cx2, |
|
688 |
BiasedLockingCounters* counters, |
|
689 |
RTMLockingCounters* rtm_counters, |
|
690 |
RTMLockingCounters* stack_rtm_counters, |
|
691 |
Metadata* method_data, |
|
692 |
bool use_rtm, bool profile_rtm); |
|
693 |
void fast_unlock(Register obj, Register box, Register tmp, bool use_rtm); |
|
694 |
#if INCLUDE_RTM_OPT |
|
695 |
void rtm_counters_update(Register abort_status, Register rtm_counters); |
|
696 |
void branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel); |
|
697 |
void rtm_abort_ratio_calculation(Register tmp, Register rtm_counters_reg, |
|
698 |
RTMLockingCounters* rtm_counters, |
|
699 |
Metadata* method_data); |
|
700 |
void rtm_profiling(Register abort_status_Reg, Register rtm_counters_Reg, |
|
701 |
RTMLockingCounters* rtm_counters, Metadata* method_data, bool profile_rtm); |
|
702 |
void rtm_retry_lock_on_abort(Register retry_count, Register abort_status, Label& retryLabel); |
|
703 |
void rtm_retry_lock_on_busy(Register retry_count, Register box, Register tmp, Register scr, Label& retryLabel); |
|
704 |
void rtm_stack_locking(Register obj, Register tmp, Register scr, |
|
705 |
Register retry_on_abort_count, |
|
706 |
RTMLockingCounters* stack_rtm_counters, |
|
707 |
Metadata* method_data, bool profile_rtm, |
|
708 |
Label& DONE_LABEL, Label& IsInflated); |
|
709 |
void rtm_inflated_locking(Register obj, Register box, Register tmp, |
|
710 |
Register scr, Register retry_on_busy_count, |
|
711 |
Register retry_on_abort_count, |
|
712 |
RTMLockingCounters* rtm_counters, |
|
713 |
Metadata* method_data, bool profile_rtm, |
|
714 |
Label& DONE_LABEL); |
|
715 |
#endif |
|
22910
88c3369b5967
8033805: Move Fast_Lock/Fast_Unlock code from .ad files to macroassembler
kvn
parents:
20702
diff
changeset
|
716 |
#endif |
14626 | 717 |
|
718 |
Condition negate_condition(Condition cond); |
|
719 |
||
720 |
// Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit |
|
721 |
// operands. In general the names are modified to avoid hiding the instruction in Assembler |
|
722 |
// so that we don't need to implement all the varieties in the Assembler with trivial wrappers |
|
723 |
// here in MacroAssembler. The major exception to this rule is call |
|
724 |
||
725 |
// Arithmetics |
|
726 |
||
727 |
||
728 |
void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; } |
|
729 |
void addptr(Address dst, Register src); |
|
730 |
||
731 |
void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); } |
|
732 |
void addptr(Register dst, int32_t src); |
|
733 |
void addptr(Register dst, Register src); |
|
734 |
void addptr(Register dst, RegisterOrConstant src) { |
|
735 |
if (src.is_constant()) addptr(dst, (int) src.as_constant()); |
|
736 |
else addptr(dst, src.as_register()); |
|
737 |
} |
|
738 |
||
739 |
void andptr(Register dst, int32_t src); |
|
740 |
void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; } |
|
741 |
||
742 |
void cmp8(AddressLiteral src1, int imm); |
|
743 |
||
744 |
// renamed to drag out the casting of address to int32_t/intptr_t |
|
745 |
void cmp32(Register src1, int32_t imm); |
|
746 |
||
747 |
void cmp32(AddressLiteral src1, int32_t imm); |
|
748 |
// compare reg - mem, or reg - &mem |
|
749 |
void cmp32(Register src1, AddressLiteral src2); |
|
750 |
||
751 |
void cmp32(Register src1, Address src2); |
|
752 |
||
753 |
#ifndef _LP64 |
|
754 |
void cmpklass(Address dst, Metadata* obj); |
|
755 |
void cmpklass(Register dst, Metadata* obj); |
|
756 |
void cmpoop(Address dst, jobject obj); |
|
50536
8434981a4137
8203157: Object equals abstraction for BarrierSetAssembler
rkennke
parents:
50534
diff
changeset
|
757 |
void cmpoop_raw(Address dst, jobject obj); |
47683
f433d49aceb4
8184914: Use MacroAssembler::cmpoop() consistently when comparing heap objects
rkennke
parents:
47216
diff
changeset
|
758 |
#endif // _LP64 |
f433d49aceb4
8184914: Use MacroAssembler::cmpoop() consistently when comparing heap objects
rkennke
parents:
47216
diff
changeset
|
759 |
|
f433d49aceb4
8184914: Use MacroAssembler::cmpoop() consistently when comparing heap objects
rkennke
parents:
47216
diff
changeset
|
760 |
void cmpoop(Register src1, Register src2); |
f433d49aceb4
8184914: Use MacroAssembler::cmpoop() consistently when comparing heap objects
rkennke
parents:
47216
diff
changeset
|
761 |
void cmpoop(Register src1, Address src2); |
14626 | 762 |
void cmpoop(Register dst, jobject obj); |
50536
8434981a4137
8203157: Object equals abstraction for BarrierSetAssembler
rkennke
parents:
50534
diff
changeset
|
763 |
void cmpoop_raw(Register dst, jobject obj); |
14626 | 764 |
|
765 |
// NOTE src2 must be the lval. This is NOT an mem-mem compare |
|
766 |
void cmpptr(Address src1, AddressLiteral src2); |
|
767 |
||
768 |
void cmpptr(Register src1, AddressLiteral src2); |
|
769 |
||
770 |
void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } |
|
771 |
void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } |
|
772 |
// void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } |
|
773 |
||
774 |
void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } |
|
775 |
void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } |
|
776 |
||
777 |
// cmp64 to avoild hiding cmpq |
|
778 |
void cmp64(Register src1, AddressLiteral src); |
|
779 |
||
780 |
void cmpxchgptr(Register reg, Address adr); |
|
781 |
||
782 |
void locked_cmpxchgptr(Register reg, AddressLiteral adr); |
|
783 |
||
784 |
||
785 |
void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); } |
|
23491 | 786 |
void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); } |
14626 | 787 |
|
788 |
||
789 |
void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); } |
|
790 |
||
791 |
void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); } |
|
792 |
||
793 |
void shlptr(Register dst, int32_t shift); |
|
794 |
void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); } |
|
795 |
||
796 |
void shrptr(Register dst, int32_t shift); |
|
797 |
void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); } |
|
798 |
||
799 |
void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); } |
|
800 |
void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); } |
|
801 |
||
802 |
void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } |
|
803 |
||
804 |
void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } |
|
805 |
void subptr(Register dst, int32_t src); |
|
806 |
// Force generation of a 4 byte immediate value even if it fits into 8bit |
|
807 |
void subptr_imm32(Register dst, int32_t src); |
|
808 |
void subptr(Register dst, Register src); |
|
809 |
void subptr(Register dst, RegisterOrConstant src) { |
|
810 |
if (src.is_constant()) subptr(dst, (int) src.as_constant()); |
|
811 |
else subptr(dst, src.as_register()); |
|
812 |
} |
|
813 |
||
814 |
void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } |
|
815 |
void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } |
|
816 |
||
817 |
void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } |
|
818 |
void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } |
|
819 |
||
820 |
void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; } |
|
821 |
||
822 |
||
823 |
||
824 |
// Helper functions for statistics gathering. |
|
825 |
// Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes. |
|
826 |
void cond_inc32(Condition cond, AddressLiteral counter_addr); |
|
827 |
// Unconditional atomic increment. |
|
23491 | 828 |
void atomic_incl(Address counter_addr); |
829 |
void atomic_incl(AddressLiteral counter_addr, Register scr = rscratch1); |
|
830 |
#ifdef _LP64 |
|
831 |
void atomic_incq(Address counter_addr); |
|
832 |
void atomic_incq(AddressLiteral counter_addr, Register scr = rscratch1); |
|
833 |
#endif |
|
834 |
void atomic_incptr(AddressLiteral counter_addr, Register scr = rscratch1) { LP64_ONLY(atomic_incq(counter_addr, scr)) NOT_LP64(atomic_incl(counter_addr, scr)) ; } |
|
835 |
void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; } |
|
14626 | 836 |
|
837 |
void lea(Register dst, AddressLiteral adr); |
|
838 |
void lea(Address dst, AddressLiteral adr); |
|
839 |
void lea(Register dst, Address adr) { Assembler::lea(dst, adr); } |
|
840 |
||
841 |
void leal32(Register dst, Address src) { leal(dst, src); } |
|
842 |
||
843 |
// Import other testl() methods from the parent class or else |
|
844 |
// they will be hidden by the following overriding declaration. |
|
845 |
using Assembler::testl; |
|
846 |
void testl(Register dst, AddressLiteral src); |
|
847 |
||
848 |
void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } |
|
849 |
void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } |
|
850 |
void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } |
|
20702
bbe0fcde6e13
8023657: New type profiling points: arguments to call
roland
parents:
19319
diff
changeset
|
851 |
void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); } |
14626 | 852 |
|
853 |
void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); } |
|
50103
b99e90f885bf
8202993: Add support for x86 testptr/testq with register and address
pliden
parents:
49816
diff
changeset
|
854 |
void testptr(Register src1, Address src2) { LP64_ONLY(testq(src1, src2)) NOT_LP64(testl(src1, src2)); } |
14626 | 855 |
void testptr(Register src1, Register src2); |
856 |
||
857 |
void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } |
|
858 |
void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } |
|
859 |
||
860 |
// Calls |
|
861 |
||
862 |
void call(Label& L, relocInfo::relocType rtype); |
|
863 |
void call(Register entry); |
|
864 |
||
38238
1bbcc430c78d
8151268: Wire up the x86 _vectorizedMismatch stub routine in C1
psandoz
parents:
38135
diff
changeset
|
865 |
// NOTE: this call transfers to the effective address of entry NOT |
14626 | 866 |
// the address contained by entry. This is because this is more natural |
867 |
// for jumps/calls. |
|
868 |
void call(AddressLiteral entry); |
|
869 |
||
870 |
// Emit the CompiledIC call idiom |
|
35086
bbf32241d851
8072008: Emit direct call instead of linkTo* for recursive indy/MH.invoke* calls
vlivanov
parents:
34203
diff
changeset
|
871 |
void ic_call(address entry, jint method_index = 0); |
14626 | 872 |
|
873 |
// Jumps |
|
874 |
||
875 |
// NOTE: these jumps tranfer to the effective address of dst NOT |
|
876 |
// the address contained by dst. This is because this is more natural |
|
877 |
// for jumps/calls. |
|
878 |
void jump(AddressLiteral dst); |
|
879 |
void jump_cc(Condition cc, AddressLiteral dst); |
|
880 |
||
881 |
// 32bit can do a case table jump in one instruction but we no longer allow the base |
|
882 |
// to be installed in the Address class. This jump will tranfers to the address |
|
883 |
// contained in the location described by entry (not the address of entry) |
|
884 |
void jump(ArrayAddress entry); |
|
885 |
||
886 |
// Floating |
|
887 |
||
888 |
void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); } |
|
54750 | 889 |
void andpd(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); |
35540
e001ad24dcdb
8143353: update for x86 sin and cos in the math lib
vdeshpande
parents:
35146
diff
changeset
|
890 |
void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); } |
14626 | 891 |
|
892 |
void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); } |
|
893 |
void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); } |
|
54750 | 894 |
void andps(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); |
14626 | 895 |
|
896 |
void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); } |
|
897 |
void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); } |
|
898 |
void comiss(XMMRegister dst, AddressLiteral src); |
|
899 |
||
900 |
void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); } |
|
901 |
void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); } |
|
902 |
void comisd(XMMRegister dst, AddressLiteral src); |
|
903 |
||
904 |
void fadd_s(Address src) { Assembler::fadd_s(src); } |
|
905 |
void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); } |
|
906 |
||
907 |
void fldcw(Address src) { Assembler::fldcw(src); } |
|
908 |
void fldcw(AddressLiteral src); |
|
909 |
||
910 |
void fld_s(int index) { Assembler::fld_s(index); } |
|
911 |
void fld_s(Address src) { Assembler::fld_s(src); } |
|
912 |
void fld_s(AddressLiteral src); |
|
913 |
||
914 |
void fld_d(Address src) { Assembler::fld_d(src); } |
|
915 |
void fld_d(AddressLiteral src); |
|
916 |
||
917 |
void fld_x(Address src) { Assembler::fld_x(src); } |
|
918 |
void fld_x(AddressLiteral src); |
|
919 |
||
920 |
void fmul_s(Address src) { Assembler::fmul_s(src); } |
|
921 |
void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); } |
|
922 |
||
923 |
void ldmxcsr(Address src) { Assembler::ldmxcsr(src); } |
|
924 |
void ldmxcsr(AddressLiteral src); |
|
925 |
||
38135
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
926 |
#ifdef _LP64 |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
927 |
private: |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
928 |
void sha256_AVX2_one_round_compute( |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
929 |
Register reg_old_h, |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
930 |
Register reg_a, |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
931 |
Register reg_b, |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
932 |
Register reg_c, |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
933 |
Register reg_d, |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
934 |
Register reg_e, |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
935 |
Register reg_f, |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
936 |
Register reg_g, |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
937 |
Register reg_h, |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
938 |
int iter); |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
939 |
void sha256_AVX2_four_rounds_compute_first(int start); |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
940 |
void sha256_AVX2_four_rounds_compute_last(int start); |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
941 |
void sha256_AVX2_one_round_and_sched( |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
942 |
XMMRegister xmm_0, /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */ |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
943 |
XMMRegister xmm_1, /* ymm5 */ /* full cycle is 16 iterations */ |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
944 |
XMMRegister xmm_2, /* ymm6 */ |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
945 |
XMMRegister xmm_3, /* ymm7 */ |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
946 |
Register reg_a, /* == eax on 0 iteration, then rotate 8 register right on each next iteration */ |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
947 |
Register reg_b, /* ebx */ /* full cycle is 8 iterations */ |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
948 |
Register reg_c, /* edi */ |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
949 |
Register reg_d, /* esi */ |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
950 |
Register reg_e, /* r8d */ |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
951 |
Register reg_f, /* r9d */ |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
952 |
Register reg_g, /* r10d */ |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
953 |
Register reg_h, /* r11d */ |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
954 |
int iter); |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
955 |
|
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
956 |
void addm(int disp, Register r1, Register r2); |
52990
1ed8de9045a7
8214074: Ghash optimization using AVX instructions
ascarpino
parents:
52462
diff
changeset
|
957 |
void gfmul(XMMRegister tmp0, XMMRegister t); |
1ed8de9045a7
8214074: Ghash optimization using AVX instructions
ascarpino
parents:
52462
diff
changeset
|
958 |
void schoolbookAAD(int i, Register subkeyH, XMMRegister data, XMMRegister tmp0, |
1ed8de9045a7
8214074: Ghash optimization using AVX instructions
ascarpino
parents:
52462
diff
changeset
|
959 |
XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3); |
1ed8de9045a7
8214074: Ghash optimization using AVX instructions
ascarpino
parents:
52462
diff
changeset
|
960 |
void generateHtbl_one_block(Register htbl); |
1ed8de9045a7
8214074: Ghash optimization using AVX instructions
ascarpino
parents:
52462
diff
changeset
|
961 |
void generateHtbl_eight_blocks(Register htbl); |
38135
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
962 |
public: |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
963 |
void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
964 |
XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
965 |
Register buf, Register state, Register ofs, Register limit, Register rsp, |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
966 |
bool multi_block, XMMRegister shuf_mask); |
52990
1ed8de9045a7
8214074: Ghash optimization using AVX instructions
ascarpino
parents:
52462
diff
changeset
|
967 |
void avx_ghash(Register state, Register htbl, Register data, Register blocks); |
38135
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
968 |
#endif |
e06e2d071465
8154495: SHA256 AVX2 intrinsic (when no supports_sha() available)
jcivlin
parents:
38049
diff
changeset
|
969 |
|
42039 | 970 |
#ifdef _LP64 |
971 |
private: |
|
972 |
void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d, |
|
973 |
Register e, Register f, Register g, Register h, int iteration); |
|
974 |
||
975 |
void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, |
|
976 |
Register a, Register b, Register c, Register d, Register e, Register f, |
|
977 |
Register g, Register h, int iteration); |
|
978 |
||
979 |
void addmq(int disp, Register r1, Register r2); |
|
980 |
public: |
|
981 |
void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, |
|
982 |
XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, |
|
983 |
Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block, |
|
984 |
XMMRegister shuf_mask); |
|
57786
948ac3112da8
8225625: AES Electronic Codebook (ECB) encryption and decryption optimization using AVX512 + VAES instructions
srukmannagar
parents:
55105
diff
changeset
|
985 |
private: |
948ac3112da8
8225625: AES Electronic Codebook (ECB) encryption and decryption optimization using AVX512 + VAES instructions
srukmannagar
parents:
55105
diff
changeset
|
986 |
void roundEnc(XMMRegister key, int rnum); |
948ac3112da8
8225625: AES Electronic Codebook (ECB) encryption and decryption optimization using AVX512 + VAES instructions
srukmannagar
parents:
55105
diff
changeset
|
987 |
void lastroundEnc(XMMRegister key, int rnum); |
948ac3112da8
8225625: AES Electronic Codebook (ECB) encryption and decryption optimization using AVX512 + VAES instructions
srukmannagar
parents:
55105
diff
changeset
|
988 |
void roundDec(XMMRegister key, int rnum); |
948ac3112da8
8225625: AES Electronic Codebook (ECB) encryption and decryption optimization using AVX512 + VAES instructions
srukmannagar
parents:
55105
diff
changeset
|
989 |
void lastroundDec(XMMRegister key, int rnum); |
948ac3112da8
8225625: AES Electronic Codebook (ECB) encryption and decryption optimization using AVX512 + VAES instructions
srukmannagar
parents:
55105
diff
changeset
|
990 |
void ev_load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask); |
948ac3112da8
8225625: AES Electronic Codebook (ECB) encryption and decryption optimization using AVX512 + VAES instructions
srukmannagar
parents:
55105
diff
changeset
|
991 |
|
948ac3112da8
8225625: AES Electronic Codebook (ECB) encryption and decryption optimization using AVX512 + VAES instructions
srukmannagar
parents:
55105
diff
changeset
|
992 |
public: |
948ac3112da8
8225625: AES Electronic Codebook (ECB) encryption and decryption optimization using AVX512 + VAES instructions
srukmannagar
parents:
55105
diff
changeset
|
993 |
void aesecb_encrypt(Register source_addr, Register dest_addr, Register key, Register len); |
948ac3112da8
8225625: AES Electronic Codebook (ECB) encryption and decryption optimization using AVX512 + VAES instructions
srukmannagar
parents:
55105
diff
changeset
|
994 |
void aesecb_decrypt(Register source_addr, Register dest_addr, Register key, Register len); |
948ac3112da8
8225625: AES Electronic Codebook (ECB) encryption and decryption optimization using AVX512 + VAES instructions
srukmannagar
parents:
55105
diff
changeset
|
995 |
|
42039 | 996 |
#endif |
997 |
||
36555 | 998 |
void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0, |
999 |
XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask, |
|
1000 |
Register buf, Register state, Register ofs, Register limit, Register rsp, |
|
1001 |
bool multi_block); |
|
1002 |
||
1003 |
#ifdef _LP64 |
|
1004 |
void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, |
|
1005 |
XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, |
|
1006 |
Register buf, Register state, Register ofs, Register limit, Register rsp, |
|
1007 |
bool multi_block, XMMRegister shuf_mask); |
|
1008 |
#else |
|
1009 |
void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, |
|
1010 |
XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, |
|
1011 |
Register buf, Register state, Register ofs, Register limit, Register rsp, |
|
1012 |
bool multi_block); |
|
1013 |
#endif |
|
1014 |
||
33089 | 1015 |
void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, |
1016 |
XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, |
|
1017 |
Register rax, Register rcx, Register rdx, Register tmp); |
|
33465 | 1018 |
|
36555 | 1019 |
#ifdef _LP64 |
33465 | 1020 |
void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, |
1021 |
XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, |
|
36555 | 1022 |
Register rax, Register rcx, Register rdx, Register tmp1, Register tmp2); |
35540
e001ad24dcdb
8143353: update for x86 sin and cos in the math lib
vdeshpande
parents:
35146
diff
changeset
|
1023 |
|
38018
1dc6c6f21231
8152907: Update for x86 tan and log10 in the math lib
vdeshpande
parents:
37293
diff
changeset
|
1024 |
void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, |
1dc6c6f21231
8152907: Update for x86 tan and log10 in the math lib
vdeshpande
parents:
37293
diff
changeset
|
1025 |
XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, |
1dc6c6f21231
8152907: Update for x86 tan and log10 in the math lib
vdeshpande
parents:
37293
diff
changeset
|
1026 |
Register rax, Register rcx, Register rdx, Register r11); |
1dc6c6f21231
8152907: Update for x86 tan and log10 in the math lib
vdeshpande
parents:
37293
diff
changeset
|
1027 |
|
35146 | 1028 |
void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, |
1029 |
XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx, |
|
36555 | 1030 |
Register rdx, Register tmp1, Register tmp2, Register tmp3, Register tmp4); |
33465 | 1031 |
|
35540
e001ad24dcdb
8143353: update for x86 sin and cos in the math lib
vdeshpande
parents:
35146
diff
changeset
|
1032 |
void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, |
e001ad24dcdb
8143353: update for x86 sin and cos in the math lib
vdeshpande
parents:
35146
diff
changeset
|
1033 |
XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, |
36555 | 1034 |
Register rax, Register rbx, Register rcx, Register rdx, Register tmp1, Register tmp2, |
1035 |
Register tmp3, Register tmp4); |
|
35540
e001ad24dcdb
8143353: update for x86 sin and cos in the math lib
vdeshpande
parents:
35146
diff
changeset
|
1036 |
|
e001ad24dcdb
8143353: update for x86 sin and cos in the math lib
vdeshpande
parents:
35146
diff
changeset
|
1037 |
void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, |
e001ad24dcdb
8143353: update for x86 sin and cos in the math lib
vdeshpande
parents:
35146
diff
changeset
|
1038 |
XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, |
36555 | 1039 |
Register rax, Register rcx, Register rdx, Register tmp1, |
1040 |
Register tmp2, Register tmp3, Register tmp4); |
|
38018
1dc6c6f21231
8152907: Update for x86 tan and log10 in the math lib
vdeshpande
parents:
37293
diff
changeset
|
1041 |
void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, |
1dc6c6f21231
8152907: Update for x86 tan and log10 in the math lib
vdeshpande
parents:
37293
diff
changeset
|
1042 |
XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, |
1dc6c6f21231
8152907: Update for x86 tan and log10 in the math lib
vdeshpande
parents:
37293
diff
changeset
|
1043 |
Register rax, Register rcx, Register rdx, Register tmp1, |
1dc6c6f21231
8152907: Update for x86 tan and log10 in the math lib
vdeshpande
parents:
37293
diff
changeset
|
1044 |
Register tmp2, Register tmp3, Register tmp4); |
36555 | 1045 |
#else |
1046 |
void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, |
|
1047 |
XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, |
|
1048 |
Register rax, Register rcx, Register rdx, Register tmp1); |
|
35540
e001ad24dcdb
8143353: update for x86 sin and cos in the math lib
vdeshpande
parents:
35146
diff
changeset
|
1049 |
|
38018
1dc6c6f21231
8152907: Update for x86 tan and log10 in the math lib
vdeshpande
parents:
37293
diff
changeset
|
1050 |
void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, |
1dc6c6f21231
8152907: Update for x86 tan and log10 in the math lib
vdeshpande
parents:
37293
diff
changeset
|
1051 |
XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, |
1dc6c6f21231
8152907: Update for x86 tan and log10 in the math lib
vdeshpande
parents:
37293
diff
changeset
|
1052 |
Register rax, Register rcx, Register rdx, Register tmp); |
1dc6c6f21231
8152907: Update for x86 tan and log10 in the math lib
vdeshpande
parents:
37293
diff
changeset
|
1053 |
|
36555 | 1054 |
void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, |
1055 |
XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx, |
|
1056 |
Register rdx, Register tmp); |
|
1057 |
||
1058 |
void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, |
|
1059 |
XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, |
|
1060 |
Register rax, Register rbx, Register rdx); |
|
1061 |
||
1062 |
void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, |
|
1063 |
XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, |
|
1064 |
Register rax, Register rcx, Register rdx, Register tmp); |
|
1065 |
||
35540
e001ad24dcdb
8143353: update for x86 sin and cos in the math lib
vdeshpande
parents:
35146
diff
changeset
|
1066 |
void libm_sincos_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, |
e001ad24dcdb
8143353: update for x86 sin and cos in the math lib
vdeshpande
parents:
35146
diff
changeset
|
1067 |
Register edx, Register ebx, Register esi, Register edi, |
e001ad24dcdb
8143353: update for x86 sin and cos in the math lib
vdeshpande
parents:
35146
diff
changeset
|
1068 |
Register ebp, Register esp); |
36555 | 1069 |
|
35540
e001ad24dcdb
8143353: update for x86 sin and cos in the math lib
vdeshpande
parents:
35146
diff
changeset
|
1070 |
void libm_reduce_pi04l(Register eax, Register ecx, Register edx, Register ebx, |
e001ad24dcdb
8143353: update for x86 sin and cos in the math lib
vdeshpande
parents:
35146
diff
changeset
|
1071 |
Register esi, Register edi, Register ebp, Register esp); |
38018
1dc6c6f21231
8152907: Update for x86 tan and log10 in the math lib
vdeshpande
parents:
37293
diff
changeset
|
1072 |
|
1dc6c6f21231
8152907: Update for x86 tan and log10 in the math lib
vdeshpande
parents:
37293
diff
changeset
|
1073 |
void libm_tancot_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, |
1dc6c6f21231
8152907: Update for x86 tan and log10 in the math lib
vdeshpande
parents:
37293
diff
changeset
|
1074 |
Register edx, Register ebx, Register esi, Register edi, |
1dc6c6f21231
8152907: Update for x86 tan and log10 in the math lib
vdeshpande
parents:
37293
diff
changeset
|
1075 |
Register ebp, Register esp); |
1dc6c6f21231
8152907: Update for x86 tan and log10 in the math lib
vdeshpande
parents:
37293
diff
changeset
|
1076 |
|
1dc6c6f21231
8152907: Update for x86 tan and log10 in the math lib
vdeshpande
parents:
37293
diff
changeset
|
1077 |
void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, |
1dc6c6f21231
8152907: Update for x86 tan and log10 in the math lib
vdeshpande
parents:
37293
diff
changeset
|
1078 |
XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, |
1dc6c6f21231
8152907: Update for x86 tan and log10 in the math lib
vdeshpande
parents:
37293
diff
changeset
|
1079 |
Register rax, Register rcx, Register rdx, Register tmp); |
35540
e001ad24dcdb
8143353: update for x86 sin and cos in the math lib
vdeshpande
parents:
35146
diff
changeset
|
1080 |
#endif |
e001ad24dcdb
8143353: update for x86 sin and cos in the math lib
vdeshpande
parents:
35146
diff
changeset
|
1081 |
|
14626 | 1082 |
void increase_precision(); |
1083 |
void restore_precision(); |
|
1084 |
||
1085 |
private: |
|
1086 |
||
1087 |
// these are private because users should be doing movflt/movdbl |
|
1088 |
||
54750 | 1089 |
void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); } |
14626 | 1090 |
void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); } |
1091 |
void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); } |
|
1092 |
void movss(XMMRegister dst, AddressLiteral src); |
|
1093 |
||
1094 |
void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); } |
|
1095 |
void movlpd(XMMRegister dst, AddressLiteral src); |
|
1096 |
||
1097 |
public: |
|
1098 |
||
1099 |
void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); } |
|
1100 |
void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); } |
|
1101 |
void addsd(XMMRegister dst, AddressLiteral src); |
|
1102 |
||
1103 |
void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); } |
|
1104 |
void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); } |
|
1105 |
void addss(XMMRegister dst, AddressLiteral src); |
|
1106 |
||
35540
e001ad24dcdb
8143353: update for x86 sin and cos in the math lib
vdeshpande
parents:
35146
diff
changeset
|
1107 |
void addpd(XMMRegister dst, XMMRegister src) { Assembler::addpd(dst, src); } |
e001ad24dcdb
8143353: update for x86 sin and cos in the math lib
vdeshpande
parents:
35146
diff
changeset
|
1108 |
void addpd(XMMRegister dst, Address src) { Assembler::addpd(dst, src); } |
e001ad24dcdb
8143353: update for x86 sin and cos in the math lib
vdeshpande
parents:
35146
diff
changeset
|
1109 |
void addpd(XMMRegister dst, AddressLiteral src); |
e001ad24dcdb
8143353: update for x86 sin and cos in the math lib
vdeshpande
parents:
35146
diff
changeset
|
1110 |
|
14626 | 1111 |
void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); } |
1112 |
void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); } |
|
1113 |
void divsd(XMMRegister dst, AddressLiteral src); |
|
1114 |
||
1115 |
void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); } |
|
1116 |
void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); } |
|
1117 |
void divss(XMMRegister dst, AddressLiteral src); |
|
1118 |
||
1119 |
// Move Unaligned Double Quadword |
|
34162 | 1120 |
void movdqu(Address dst, XMMRegister src); |
1121 |
void movdqu(XMMRegister dst, Address src); |
|
1122 |
void movdqu(XMMRegister dst, XMMRegister src); |
|
43423
bcaab17f72a5
8171974: Fix for R10 Register clobbering with usage of ExternalAddress
vdeshpande
parents:
42039
diff
changeset
|
1123 |
void movdqu(XMMRegister dst, AddressLiteral src, Register scratchReg = rscratch1); |
34162 | 1124 |
// AVX Unaligned forms |
1125 |
void vmovdqu(Address dst, XMMRegister src); |
|
1126 |
void vmovdqu(XMMRegister dst, Address src); |
|
1127 |
void vmovdqu(XMMRegister dst, XMMRegister src); |
|
54750 | 1128 |
void vmovdqu(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); |
50860
480a96a43b62
8205528: Base64 encoding algorithm using AVX512 instructions
kvn
parents:
50693
diff
changeset
|
1129 |
void evmovdquq(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } |
480a96a43b62
8205528: Base64 encoding algorithm using AVX512 instructions
kvn
parents:
50693
diff
changeset
|
1130 |
void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } |
480a96a43b62
8205528: Base64 encoding algorithm using AVX512 instructions
kvn
parents:
50693
diff
changeset
|
1131 |
void evmovdquq(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } |
480a96a43b62
8205528: Base64 encoding algorithm using AVX512 instructions
kvn
parents:
50693
diff
changeset
|
1132 |
void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch); |
14626 | 1133 |
|
18507
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
1134 |
// Move Aligned Double Quadword |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
1135 |
void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); } |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
1136 |
void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); } |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
1137 |
void movdqa(XMMRegister dst, AddressLiteral src); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
1138 |
|
14626 | 1139 |
void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } |
1140 |
void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } |
|
1141 |
void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } |
|
1142 |
void movsd(XMMRegister dst, AddressLiteral src); |
|
1143 |
||
33089 | 1144 |
void mulpd(XMMRegister dst, XMMRegister src) { Assembler::mulpd(dst, src); } |
1145 |
void mulpd(XMMRegister dst, Address src) { Assembler::mulpd(dst, src); } |
|
1146 |
void mulpd(XMMRegister dst, AddressLiteral src); |
|
1147 |
||
14626 | 1148 |
void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); } |
1149 |
void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); } |
|
1150 |
void mulsd(XMMRegister dst, AddressLiteral src); |
|
1151 |
||
1152 |
void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); } |
|
1153 |
void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); } |
|
1154 |
void mulss(XMMRegister dst, AddressLiteral src); |
|
1155 |
||
25932
15d133edd8f6
8052081: Optimize generated by C2 code for Intel's Atom processor
kvn
parents:
24018
diff
changeset
|
1156 |
// Carry-Less Multiplication Quadword |
15d133edd8f6
8052081: Optimize generated by C2 code for Intel's Atom processor
kvn
parents:
24018
diff
changeset
|
1157 |
void pclmulldq(XMMRegister dst, XMMRegister src) { |
15d133edd8f6
8052081: Optimize generated by C2 code for Intel's Atom processor
kvn
parents:
24018
diff
changeset
|
1158 |
// 0x00 - multiply lower 64 bits [0:63] |
15d133edd8f6
8052081: Optimize generated by C2 code for Intel's Atom processor
kvn
parents:
24018
diff
changeset
|
1159 |
Assembler::pclmulqdq(dst, src, 0x00); |
15d133edd8f6
8052081: Optimize generated by C2 code for Intel's Atom processor
kvn
parents:
24018
diff
changeset
|
1160 |
} |
15d133edd8f6
8052081: Optimize generated by C2 code for Intel's Atom processor
kvn
parents:
24018
diff
changeset
|
1161 |
void pclmulhdq(XMMRegister dst, XMMRegister src) { |
15d133edd8f6
8052081: Optimize generated by C2 code for Intel's Atom processor
kvn
parents:
24018
diff
changeset
|
1162 |
// 0x11 - multiply upper 64 bits [64:127] |
15d133edd8f6
8052081: Optimize generated by C2 code for Intel's Atom processor
kvn
parents:
24018
diff
changeset
|
1163 |
Assembler::pclmulqdq(dst, src, 0x11); |
15d133edd8f6
8052081: Optimize generated by C2 code for Intel's Atom processor
kvn
parents:
24018
diff
changeset
|
1164 |
} |
15d133edd8f6
8052081: Optimize generated by C2 code for Intel's Atom processor
kvn
parents:
24018
diff
changeset
|
1165 |
|
34203 | 1166 |
void pcmpeqb(XMMRegister dst, XMMRegister src); |
1167 |
void pcmpeqw(XMMRegister dst, XMMRegister src); |
|
1168 |
||
1169 |
void pcmpestri(XMMRegister dst, Address src, int imm8); |
|
1170 |
void pcmpestri(XMMRegister dst, XMMRegister src, int imm8); |
|
1171 |
||
1172 |
void pmovzxbw(XMMRegister dst, XMMRegister src); |
|
1173 |
void pmovzxbw(XMMRegister dst, Address src); |
|
1174 |
||
1175 |
void pmovmskb(Register dst, XMMRegister src); |
|
1176 |
||
1177 |
void ptest(XMMRegister dst, XMMRegister src); |
|
1178 |
||
14626 | 1179 |
void sqrtsd(XMMRegister dst, XMMRegister src) { Assembler::sqrtsd(dst, src); } |
1180 |
void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); } |
|
1181 |
void sqrtsd(XMMRegister dst, AddressLiteral src); |
|
1182 |
||
58421
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
57804
diff
changeset
|
1183 |
void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
57804
diff
changeset
|
1184 |
void roundsd(XMMRegister dst, Address src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
57804
diff
changeset
|
1185 |
void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register scratch_reg); |
6fc57e391539
8226721: Missing intrinsics for Math.ceil, floor, rint
neliasso
parents:
57804
diff
changeset
|
1186 |
|
14626 | 1187 |
void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); } |
1188 |
void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); } |
|
1189 |
void sqrtss(XMMRegister dst, AddressLiteral src); |
|
1190 |
||
1191 |
void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); } |
|
1192 |
void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); } |
|
1193 |
void subsd(XMMRegister dst, AddressLiteral src); |
|
1194 |
||
1195 |
void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); } |
|
1196 |
void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); } |
|
1197 |
void subss(XMMRegister dst, AddressLiteral src); |
|
1198 |
||
1199 |
void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); } |
|
1200 |
void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); } |
|
1201 |
void ucomiss(XMMRegister dst, AddressLiteral src); |
|
1202 |
||
1203 |
void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); } |
|
1204 |
void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); } |
|
1205 |
void ucomisd(XMMRegister dst, AddressLiteral src); |
|
1206 |
||
1207 |
// Bitwise Logical XOR of Packed Double-Precision Floating-Point Values |
|
34162 | 1208 |
void xorpd(XMMRegister dst, XMMRegister src); |
14626 | 1209 |
void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); } |
54750 | 1210 |
void xorpd(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); |
14626 | 1211 |
|
1212 |
// Bitwise Logical XOR of Packed Single-Precision Floating-Point Values |
|
34162 | 1213 |
void xorps(XMMRegister dst, XMMRegister src); |
14626 | 1214 |
void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); } |
54750 | 1215 |
void xorps(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); |
14626 | 1216 |
|
1217 |
// Shuffle Bytes |
|
1218 |
void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); } |
|
1219 |
void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); } |
|
1220 |
void pshufb(XMMRegister dst, AddressLiteral src); |
|
1221 |
// AVX 3-operands instructions |
|
1222 |
||
1223 |
void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); } |
|
1224 |
void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); } |
|
1225 |
void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); |
|
1226 |
||
1227 |
void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); } |
|
1228 |
void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); } |
|
1229 |
void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src); |
|
1230 |
||
34162 | 1231 |
void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len); |
1232 |
void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len); |
|
1233 |
||
1234 |
void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); |
|
1235 |
void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); |
|
1236 |
||
1237 |
void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); |
|
1238 |
void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); |
|
1239 |
||
42039 | 1240 |
void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } |
1241 |
void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } |
|
54750 | 1242 |
void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); |
42039 | 1243 |
|
51857 | 1244 |
void vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len); |
1245 |
void vpbroadcastw(XMMRegister dst, Address src, int vector_len) { Assembler::vpbroadcastw(dst, src, vector_len); } |
|
34203 | 1246 |
|
1247 |
void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); |
|
51857 | 1248 |
|
34203 | 1249 |
void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); |
1250 |
||
1251 |
void vpmovzxbw(XMMRegister dst, Address src, int vector_len); |
|
50860
480a96a43b62
8205528: Base64 encoding algorithm using AVX512 instructions
kvn
parents:
50693
diff
changeset
|
1252 |
void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); } |
480a96a43b62
8205528: Base64 encoding algorithm using AVX512 instructions
kvn
parents:
50693
diff
changeset
|
1253 |
|
34203 | 1254 |
void vpmovmskb(Register dst, XMMRegister src); |
1255 |
||
1256 |
void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); |
|
1257 |
void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); |
|
1258 |
||
34162 | 1259 |
void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); |
1260 |
void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); |
|
1261 |
||
1262 |
void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); |
|
1263 |
void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); |
|
1264 |
||
1265 |
void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); |
|
1266 |
void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); |
|
1267 |
||
54750 | 1268 |
void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); |
1269 |
void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len); |
|
1270 |
||
34162 | 1271 |
void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); |
1272 |
void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); |
|
1273 |
||
1274 |
void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); |
|
1275 |
void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); |
|
1276 |
||
34203 | 1277 |
void vptest(XMMRegister dst, XMMRegister src); |
1278 |
||
34162 | 1279 |
void punpcklbw(XMMRegister dst, XMMRegister src); |
1280 |
void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); } |
|
1281 |
||
45236
1b8879e6d9c2
8178800: compiler/c2/PolynomialRoot.java fails on Xeon Phi linux host with UseAVX=3
mcberg
parents:
44406
diff
changeset
|
1282 |
void pshufd(XMMRegister dst, Address src, int mode); |
1b8879e6d9c2
8178800: compiler/c2/PolynomialRoot.java fails on Xeon Phi linux host with UseAVX=3
mcberg
parents:
44406
diff
changeset
|
1283 |
void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); } |
1b8879e6d9c2
8178800: compiler/c2/PolynomialRoot.java fails on Xeon Phi linux host with UseAVX=3
mcberg
parents:
44406
diff
changeset
|
1284 |
|
34162 | 1285 |
void pshuflw(XMMRegister dst, XMMRegister src, int mode); |
1286 |
void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); } |
|
1287 |
||
30624 | 1288 |
void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } |
1289 |
void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } |
|
54750 | 1290 |
void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); |
14626 | 1291 |
|
30624 | 1292 |
void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } |
1293 |
void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } |
|
54750 | 1294 |
void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); |
14626 | 1295 |
|
1296 |
void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); } |
|
1297 |
void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); } |
|
1298 |
void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); |
|
1299 |
||
1300 |
void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); } |
|
1301 |
void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); } |
|
1302 |
void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src); |
|
1303 |
||
1304 |
void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); } |
|
1305 |
void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); } |
|
1306 |
void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); |
|
1307 |
||
1308 |
void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); } |
|
1309 |
void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); } |
|
1310 |
void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src); |
|
1311 |
||
1312 |
void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); } |
|
1313 |
void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); } |
|
1314 |
void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); |
|
1315 |
||
1316 |
void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); } |
|
1317 |
void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); } |
|
1318 |
void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src); |
|
1319 |
||
32727
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32391
diff
changeset
|
1320 |
void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src); |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32391
diff
changeset
|
1321 |
void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src); |
320855c2baef
8132160: support for AVX 512 call frames and stack management
mcberg
parents:
32391
diff
changeset
|
1322 |
|
14626 | 1323 |
// AVX Vector instructions |
1324 |
||
30624 | 1325 |
void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } |
1326 |
void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } |
|
54750 | 1327 |
void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); |
14626 | 1328 |
|
30624 | 1329 |
void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } |
1330 |
void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } |
|
54750 | 1331 |
void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); |
14626 | 1332 |
|
30624 | 1333 |
void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { |
1334 |
if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 |
|
1335 |
Assembler::vpxor(dst, nds, src, vector_len); |
|
14626 | 1336 |
else |
30624 | 1337 |
Assembler::vxorpd(dst, nds, src, vector_len); |
14626 | 1338 |
} |
30624 | 1339 |
void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { |
1340 |
if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 |
|
1341 |
Assembler::vpxor(dst, nds, src, vector_len); |
|
14626 | 1342 |
else |
30624 | 1343 |
Assembler::vxorpd(dst, nds, src, vector_len); |
14626 | 1344 |
} |
54750 | 1345 |
void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); |
14626 | 1346 |
|
15117
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15114
diff
changeset
|
1347 |
// Simple version for AVX2 256bit vectors |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15114
diff
changeset
|
1348 |
void vpxor(XMMRegister dst, XMMRegister src) { Assembler::vpxor(dst, dst, src, true); } |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15114
diff
changeset
|
1349 |
void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); } |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15114
diff
changeset
|
1350 |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1351 |
void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { |
37293
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1352 |
if (UseAVX > 2) { |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1353 |
Assembler::vinserti32x4(dst, dst, src, imm8); |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1354 |
} else if (UseAVX > 1) { |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1355 |
// vinserti128 is available only in AVX2 |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1356 |
Assembler::vinserti128(dst, nds, src, imm8); |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1357 |
} else { |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1358 |
Assembler::vinsertf128(dst, nds, src, imm8); |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1359 |
} |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1360 |
} |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1361 |
|
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1362 |
void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { |
37293
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1363 |
if (UseAVX > 2) { |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1364 |
Assembler::vinserti32x4(dst, dst, src, imm8); |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1365 |
} else if (UseAVX > 1) { |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1366 |
// vinserti128 is available only in AVX2 |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1367 |
Assembler::vinserti128(dst, nds, src, imm8); |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1368 |
} else { |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1369 |
Assembler::vinsertf128(dst, nds, src, imm8); |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1370 |
} |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1371 |
} |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1372 |
|
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1373 |
void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) { |
37293
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1374 |
if (UseAVX > 2) { |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1375 |
Assembler::vextracti32x4(dst, src, imm8); |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1376 |
} else if (UseAVX > 1) { |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1377 |
// vextracti128 is available only in AVX2 |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1378 |
Assembler::vextracti128(dst, src, imm8); |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1379 |
} else { |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1380 |
Assembler::vextractf128(dst, src, imm8); |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1381 |
} |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1382 |
} |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1383 |
|
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1384 |
void vextracti128(Address dst, XMMRegister src, uint8_t imm8) { |
37293
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1385 |
if (UseAVX > 2) { |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1386 |
Assembler::vextracti32x4(dst, src, imm8); |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1387 |
} else if (UseAVX > 1) { |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1388 |
// vextracti128 is available only in AVX2 |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1389 |
Assembler::vextracti128(dst, src, imm8); |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1390 |
} else { |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1391 |
Assembler::vextractf128(dst, src, imm8); |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1392 |
} |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1393 |
} |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1394 |
|
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1395 |
// 128bit copy to/from high 128 bits of 256bit (YMM) vector registers |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1396 |
void vinserti128_high(XMMRegister dst, XMMRegister src) { |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1397 |
vinserti128(dst, dst, src, 1); |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1398 |
} |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1399 |
void vinserti128_high(XMMRegister dst, Address src) { |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1400 |
vinserti128(dst, dst, src, 1); |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1401 |
} |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1402 |
void vextracti128_high(XMMRegister dst, XMMRegister src) { |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1403 |
vextracti128(dst, src, 1); |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1404 |
} |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1405 |
void vextracti128_high(Address dst, XMMRegister src) { |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1406 |
vextracti128(dst, src, 1); |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1407 |
} |
37293
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1408 |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1409 |
void vinsertf128_high(XMMRegister dst, XMMRegister src) { |
37293
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1410 |
if (UseAVX > 2) { |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1411 |
Assembler::vinsertf32x4(dst, dst, src, 1); |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1412 |
} else { |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1413 |
Assembler::vinsertf128(dst, dst, src, 1); |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1414 |
} |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1415 |
} |
37293
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1416 |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1417 |
void vinsertf128_high(XMMRegister dst, Address src) { |
37293
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1418 |
if (UseAVX > 2) { |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1419 |
Assembler::vinsertf32x4(dst, dst, src, 1); |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1420 |
} else { |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1421 |
Assembler::vinsertf128(dst, dst, src, 1); |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1422 |
} |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1423 |
} |
37293
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1424 |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1425 |
void vextractf128_high(XMMRegister dst, XMMRegister src) { |
37293
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1426 |
if (UseAVX > 2) { |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1427 |
Assembler::vextractf32x4(dst, src, 1); |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1428 |
} else { |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1429 |
Assembler::vextractf128(dst, src, 1); |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1430 |
} |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1431 |
} |
37293
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1432 |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1433 |
void vextractf128_high(Address dst, XMMRegister src) { |
37293
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1434 |
if (UseAVX > 2) { |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1435 |
Assembler::vextractf32x4(dst, src, 1); |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1436 |
} else { |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1437 |
Assembler::vextractf128(dst, src, 1); |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1438 |
} |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1439 |
} |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1440 |
|
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1441 |
// 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1442 |
void vinserti64x4_high(XMMRegister dst, XMMRegister src) { |
37293
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1443 |
Assembler::vinserti64x4(dst, dst, src, 1); |
14626 | 1444 |
} |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1445 |
void vinsertf64x4_high(XMMRegister dst, XMMRegister src) { |
37293
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1446 |
Assembler::vinsertf64x4(dst, dst, src, 1); |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1447 |
} |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1448 |
void vextracti64x4_high(XMMRegister dst, XMMRegister src) { |
37293
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1449 |
Assembler::vextracti64x4(dst, src, 1); |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1450 |
} |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1451 |
void vextractf64x4_high(XMMRegister dst, XMMRegister src) { |
37293
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1452 |
Assembler::vextractf64x4(dst, src, 1); |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1453 |
} |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1454 |
void vextractf64x4_high(Address dst, XMMRegister src) { |
37293
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1455 |
Assembler::vextractf64x4(dst, src, 1); |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1456 |
} |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1457 |
void vinsertf64x4_high(XMMRegister dst, Address src) { |
37293
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1458 |
Assembler::vinsertf64x4(dst, dst, src, 1); |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1459 |
} |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1460 |
|
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1461 |
// 128bit copy to/from low 128 bits of 256bit (YMM) vector registers |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1462 |
void vinserti128_low(XMMRegister dst, XMMRegister src) { |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1463 |
vinserti128(dst, dst, src, 0); |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1464 |
} |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1465 |
void vinserti128_low(XMMRegister dst, Address src) { |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1466 |
vinserti128(dst, dst, src, 0); |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1467 |
} |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1468 |
void vextracti128_low(XMMRegister dst, XMMRegister src) { |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1469 |
vextracti128(dst, src, 0); |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1470 |
} |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1471 |
void vextracti128_low(Address dst, XMMRegister src) { |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1472 |
vextracti128(dst, src, 0); |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1473 |
} |
37293
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1474 |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1475 |
void vinsertf128_low(XMMRegister dst, XMMRegister src) { |
37293
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1476 |
if (UseAVX > 2) { |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1477 |
Assembler::vinsertf32x4(dst, dst, src, 0); |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1478 |
} else { |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1479 |
Assembler::vinsertf128(dst, dst, src, 0); |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1480 |
} |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1481 |
} |
37293
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1482 |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1483 |
void vinsertf128_low(XMMRegister dst, Address src) { |
37293
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1484 |
if (UseAVX > 2) { |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1485 |
Assembler::vinsertf32x4(dst, dst, src, 0); |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1486 |
} else { |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1487 |
Assembler::vinsertf128(dst, dst, src, 0); |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1488 |
} |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1489 |
} |
37293
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1490 |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1491 |
void vextractf128_low(XMMRegister dst, XMMRegister src) { |
37293
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1492 |
if (UseAVX > 2) { |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1493 |
Assembler::vextractf32x4(dst, src, 0); |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1494 |
} else { |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1495 |
Assembler::vextractf128(dst, src, 0); |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1496 |
} |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1497 |
} |
37293
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1498 |
|
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1499 |
void vextractf128_low(Address dst, XMMRegister src) { |
37293
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1500 |
if (UseAVX > 2) { |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1501 |
Assembler::vextractf32x4(dst, src, 0); |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1502 |
} else { |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1503 |
Assembler::vextractf128(dst, src, 0); |
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1504 |
} |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1505 |
} |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1506 |
|
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1507 |
// 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1508 |
void vinserti64x4_low(XMMRegister dst, XMMRegister src) { |
37293
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1509 |
Assembler::vinserti64x4(dst, dst, src, 0); |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1510 |
} |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1511 |
void vinsertf64x4_low(XMMRegister dst, XMMRegister src) { |
37293
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1512 |
Assembler::vinsertf64x4(dst, dst, src, 0); |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1513 |
} |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1514 |
void vextracti64x4_low(XMMRegister dst, XMMRegister src) { |
37293
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1515 |
Assembler::vextracti64x4(dst, src, 0); |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1516 |
} |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1517 |
void vextractf64x4_low(XMMRegister dst, XMMRegister src) { |
37293
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1518 |
Assembler::vextractf64x4(dst, src, 0); |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1519 |
} |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1520 |
void vextractf64x4_low(Address dst, XMMRegister src) { |
37293
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1521 |
Assembler::vextractf64x4(dst, src, 0); |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1522 |
} |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1523 |
void vinsertf64x4_low(XMMRegister dst, Address src) { |
37293
c010188d360f
8151003: Remove nds->is_valid() checks from assembler_x86.cpp
mcberg
parents:
36561
diff
changeset
|
1524 |
Assembler::vinsertf64x4(dst, dst, src, 0); |
36561
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1525 |
} |
b18243f4d955
8151002: Make Assembler methods vextract and vinsert match actual instructions
mikael
parents:
36555
diff
changeset
|
1526 |
|
18507
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
1527 |
// Carry-Less Multiplication Quadword |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
1528 |
void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) { |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
1529 |
// 0x00 - multiply lower 64 bits [0:63] |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
1530 |
Assembler::vpclmulqdq(dst, nds, src, 0x00); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
1531 |
} |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
1532 |
void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
1533 |
// 0x11 - multiply upper 64 bits [64:127] |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
1534 |
Assembler::vpclmulqdq(dst, nds, src, 0x11); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
1535 |
} |
52990
1ed8de9045a7
8214074: Ghash optimization using AVX instructions
ascarpino
parents:
52462
diff
changeset
|
1536 |
void vpclmullqhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { |
1ed8de9045a7
8214074: Ghash optimization using AVX instructions
ascarpino
parents:
52462
diff
changeset
|
1537 |
// 0x10 - multiply nds[0:63] and src[64:127] |
1ed8de9045a7
8214074: Ghash optimization using AVX instructions
ascarpino
parents:
52462
diff
changeset
|
1538 |
Assembler::vpclmulqdq(dst, nds, src, 0x10); |
1ed8de9045a7
8214074: Ghash optimization using AVX instructions
ascarpino
parents:
52462
diff
changeset
|
1539 |
} |
1ed8de9045a7
8214074: Ghash optimization using AVX instructions
ascarpino
parents:
52462
diff
changeset
|
1540 |
void vpclmulhqlqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { |
1ed8de9045a7
8214074: Ghash optimization using AVX instructions
ascarpino
parents:
52462
diff
changeset
|
1541 |
//0x01 - multiply nds[64:127] and src[0:63] |
1ed8de9045a7
8214074: Ghash optimization using AVX instructions
ascarpino
parents:
52462
diff
changeset
|
1542 |
Assembler::vpclmulqdq(dst, nds, src, 0x01); |
1ed8de9045a7
8214074: Ghash optimization using AVX instructions
ascarpino
parents:
52462
diff
changeset
|
1543 |
} |
1ed8de9045a7
8214074: Ghash optimization using AVX instructions
ascarpino
parents:
52462
diff
changeset
|
1544 |
|
49614
3b1570be8557
8200067: Add support for vpclmulqdq for crc32
srukmannagar
parents:
49027
diff
changeset
|
1545 |
void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { |
3b1570be8557
8200067: Add support for vpclmulqdq for crc32
srukmannagar
parents:
49027
diff
changeset
|
1546 |
// 0x00 - multiply lower 64 bits [0:63] |
3b1570be8557
8200067: Add support for vpclmulqdq for crc32
srukmannagar
parents:
49027
diff
changeset
|
1547 |
Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len); |
3b1570be8557
8200067: Add support for vpclmulqdq for crc32
srukmannagar
parents:
49027
diff
changeset
|
1548 |
} |
3b1570be8557
8200067: Add support for vpclmulqdq for crc32
srukmannagar
parents:
49027
diff
changeset
|
1549 |
void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { |
3b1570be8557
8200067: Add support for vpclmulqdq for crc32
srukmannagar
parents:
49027
diff
changeset
|
1550 |
// 0x11 - multiply upper 64 bits [64:127] |
3b1570be8557
8200067: Add support for vpclmulqdq for crc32
srukmannagar
parents:
49027
diff
changeset
|
1551 |
Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len); |
3b1570be8557
8200067: Add support for vpclmulqdq for crc32
srukmannagar
parents:
49027
diff
changeset
|
1552 |
} |
18507
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
1553 |
|
14626 | 1554 |
// Data |
1555 |
||
1556 |
void cmov32( Condition cc, Register dst, Address src); |
|
1557 |
void cmov32( Condition cc, Register dst, Register src); |
|
1558 |
||
1559 |
void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); } |
|
1560 |
||
1561 |
void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } |
|
1562 |
void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } |
|
1563 |
||
1564 |
void movoop(Register dst, jobject obj); |
|
1565 |
void movoop(Address dst, jobject obj); |
|
1566 |
||
1567 |
void mov_metadata(Register dst, Metadata* obj); |
|
1568 |
void mov_metadata(Address dst, Metadata* obj); |
|
1569 |
||
1570 |
void movptr(ArrayAddress dst, Register src); |
|
1571 |
// can this do an lea? |
|
1572 |
void movptr(Register dst, ArrayAddress src); |
|
1573 |
||
1574 |
void movptr(Register dst, Address src); |
|
1575 |
||
23491 | 1576 |
#ifdef _LP64 |
1577 |
void movptr(Register dst, AddressLiteral src, Register scratch=rscratch1); |
|
1578 |
#else |
|
1579 |
void movptr(Register dst, AddressLiteral src, Register scratch=noreg); // Scratch reg is ignored in 32-bit |
|
1580 |
#endif |
|
14626 | 1581 |
|
1582 |
void movptr(Register dst, intptr_t src); |
|
1583 |
void movptr(Register dst, Register src); |
|
1584 |
void movptr(Address dst, intptr_t src); |
|
1585 |
||
1586 |
void movptr(Address dst, Register src); |
|
1587 |
||
1588 |
void movptr(Register dst, RegisterOrConstant src) { |
|
1589 |
if (src.is_constant()) movptr(dst, src.as_constant()); |
|
1590 |
else movptr(dst, src.as_register()); |
|
1591 |
} |
|
1592 |
||
1593 |
#ifdef _LP64 |
|
1594 |
// Generally the next two are only used for moving NULL |
|
1595 |
// Although there are situations in initializing the mark word where |
|
1596 |
// they could be used. They are dangerous. |
|
1597 |
||
1598 |
// They only exist on LP64 so that int32_t and intptr_t are not the same |
|
1599 |
// and we have ambiguous declarations. |
|
1600 |
||
1601 |
void movptr(Address dst, int32_t imm32); |
|
1602 |
void movptr(Register dst, int32_t imm32); |
|
1603 |
#endif // _LP64 |
|
1604 |
||
1605 |
// to avoid hiding movl |
|
1606 |
void mov32(AddressLiteral dst, Register src); |
|
1607 |
void mov32(Register dst, AddressLiteral src); |
|
1608 |
||
1609 |
// to avoid hiding movb |
|
1610 |
void movbyte(ArrayAddress dst, int src); |
|
1611 |
||
1612 |
// Import other mov() methods from the parent class or else |
|
1613 |
// they will be hidden by the following overriding declaration. |
|
1614 |
using Assembler::movdl; |
|
1615 |
using Assembler::movq; |
|
1616 |
void movdl(XMMRegister dst, AddressLiteral src); |
|
1617 |
void movq(XMMRegister dst, AddressLiteral src); |
|
1618 |
||
1619 |
// Can push value or effective address |
|
1620 |
void pushptr(AddressLiteral src); |
|
1621 |
||
1622 |
void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); } |
|
1623 |
void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); } |
|
1624 |
||
1625 |
void pushoop(jobject obj); |
|
1626 |
void pushklass(Metadata* obj); |
|
1627 |
||
1628 |
// sign extend as need a l to ptr sized element |
|
1629 |
void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); } |
|
1630 |
void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); } |
|
1631 |
||
54750 | 1632 |
#ifdef COMPILER2 |
1633 |
// Generic instructions support for use in .ad files C2 code generation |
|
1634 |
void vabsnegd(int opcode, XMMRegister dst, Register scr); |
|
1635 |
void vabsnegd(int opcode, XMMRegister dst, XMMRegister src, int vector_len, Register scr); |
|
1636 |
void vabsnegf(int opcode, XMMRegister dst, Register scr); |
|
1637 |
void vabsnegf(int opcode, XMMRegister dst, XMMRegister src, int vector_len, Register scr); |
|
1638 |
void vextendbw(bool sign, XMMRegister dst, XMMRegister src, int vector_len); |
|
1639 |
void vextendbw(bool sign, XMMRegister dst, XMMRegister src); |
|
1640 |
void vshiftd(int opcode, XMMRegister dst, XMMRegister src); |
|
1641 |
void vshiftd(int opcode, XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); |
|
1642 |
void vshiftw(int opcode, XMMRegister dst, XMMRegister src); |
|
1643 |
void vshiftw(int opcode, XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); |
|
1644 |
void vshiftq(int opcode, XMMRegister dst, XMMRegister src); |
|
1645 |
void vshiftq(int opcode, XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); |
|
1646 |
#endif |
|
1647 |
||
14626 | 1648 |
// C2 compiled method's prolog code. |
52142 | 1649 |
void verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b, bool is_stub); |
14626 | 1650 |
|
36554
a7eb9ee4680c
8146801: Allocating short arrays of non-constant size is slow
shade
parents:
35548
diff
changeset
|
1651 |
// clear memory of size 'cnt' qwords, starting at 'base'; |
a7eb9ee4680c
8146801: Allocating short arrays of non-constant size is slow
shade
parents:
35548
diff
changeset
|
1652 |
// if 'is_large' is set, do not try to produce short loop |
50534 | 1653 |
void clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, bool is_large); |
1654 |
||
1655 |
// clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers |
|
1656 |
void xmm_clear_mem(Register base, Register cnt, XMMRegister xtmp); |
|
15114
4074553c678b
8005522: use fast-string instructions on x86 for zeroing
kvn
parents:
14827
diff
changeset
|
1657 |
|
33628 | 1658 |
#ifdef COMPILER2 |
1659 |
void string_indexof_char(Register str1, Register cnt1, Register ch, Register result, |
|
1660 |
XMMRegister vec1, XMMRegister vec2, XMMRegister vec3, Register tmp); |
|
1661 |
||
14626 | 1662 |
// IndexOf strings. |
1663 |
// Small strings are loaded through stack if they cross page boundary. |
|
1664 |
void string_indexof(Register str1, Register str2, |
|
1665 |
Register cnt1, Register cnt2, |
|
1666 |
int int_cnt2, Register result, |
|
33628 | 1667 |
XMMRegister vec, Register tmp, |
1668 |
int ae); |
|
14626 | 1669 |
|
1670 |
// IndexOf for constant substrings with size >= 8 elements |
|
1671 |
// which don't need to be loaded through stack. |
|
1672 |
void string_indexofC8(Register str1, Register str2, |
|
1673 |
Register cnt1, Register cnt2, |
|
1674 |
int int_cnt2, Register result, |
|
33628 | 1675 |
XMMRegister vec, Register tmp, |
1676 |
int ae); |
|
14626 | 1677 |
|
1678 |
// Smallest code: we don't need to load through stack, |
|
1679 |
// check string tail. |
|
1680 |
||
33628 | 1681 |
// helper function for string_compare |
1682 |
void load_next_elements(Register elem1, Register elem2, Register str1, Register str2, |
|
1683 |
Address::ScaleFactor scale, Address::ScaleFactor scale1, |
|
1684 |
Address::ScaleFactor scale2, Register index, int ae); |
|
14626 | 1685 |
// Compare strings. |
1686 |
void string_compare(Register str1, Register str2, |
|
1687 |
Register cnt1, Register cnt2, Register result, |
|
33628 | 1688 |
XMMRegister vec1, int ae); |
14626 | 1689 |
|
33628 | 1690 |
// Search for Non-ASCII character (Negative byte value) in a byte array, |
1691 |
// return true if it has any and false otherwise. |
|
1692 |
void has_negatives(Register ary1, Register len, |
|
1693 |
Register result, Register tmp1, |
|
1694 |
XMMRegister vec1, XMMRegister vec2); |
|
1695 |
||
1696 |
// Compare char[] or byte[] arrays. |
|
1697 |
void arrays_equals(bool is_array_equ, Register ary1, Register ary2, |
|
1698 |
Register limit, Register result, Register chr, |
|
1699 |
XMMRegister vec1, XMMRegister vec2, bool is_char); |
|
1700 |
||
1701 |
#endif |
|
14626 | 1702 |
|
1703 |
// Fill primitive arrays |
|
1704 |
void generate_fill(BasicType t, bool aligned, |
|
1705 |
Register to, Register value, Register count, |
|
1706 |
Register rtmp, XMMRegister xtmp); |
|
1707 |
||
15242
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
1708 |
void encode_iso_array(Register src, Register dst, Register len, |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
1709 |
XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
1710 |
XMMRegister tmp4, Register tmp5, Register result); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
1711 |
|
26434
09ad55e5f486
8055494: Add C2 x86 intrinsic for BigInteger::multiplyToLen() method
kvn
parents:
25932
diff
changeset
|
1712 |
#ifdef _LP64 |
09ad55e5f486
8055494: Add C2 x86 intrinsic for BigInteger::multiplyToLen() method
kvn
parents:
25932
diff
changeset
|
1713 |
void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2); |
09ad55e5f486
8055494: Add C2 x86 intrinsic for BigInteger::multiplyToLen() method
kvn
parents:
25932
diff
changeset
|
1714 |
void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, |
09ad55e5f486
8055494: Add C2 x86 intrinsic for BigInteger::multiplyToLen() method
kvn
parents:
25932
diff
changeset
|
1715 |
Register y, Register y_idx, Register z, |
09ad55e5f486
8055494: Add C2 x86 intrinsic for BigInteger::multiplyToLen() method
kvn
parents:
25932
diff
changeset
|
1716 |
Register carry, Register product, |
09ad55e5f486
8055494: Add C2 x86 intrinsic for BigInteger::multiplyToLen() method
kvn
parents:
25932
diff
changeset
|
1717 |
Register idx, Register kdx); |
09ad55e5f486
8055494: Add C2 x86 intrinsic for BigInteger::multiplyToLen() method
kvn
parents:
25932
diff
changeset
|
1718 |
void multiply_add_128_x_128(Register x_xstart, Register y, Register z, |
09ad55e5f486
8055494: Add C2 x86 intrinsic for BigInteger::multiplyToLen() method
kvn
parents:
25932
diff
changeset
|
1719 |
Register yz_idx, Register idx, |
09ad55e5f486
8055494: Add C2 x86 intrinsic for BigInteger::multiplyToLen() method
kvn
parents:
25932
diff
changeset
|
1720 |
Register carry, Register product, int offset); |
09ad55e5f486
8055494: Add C2 x86 intrinsic for BigInteger::multiplyToLen() method
kvn
parents:
25932
diff
changeset
|
1721 |
void multiply_128_x_128_bmi2_loop(Register y, Register z, |
09ad55e5f486
8055494: Add C2 x86 intrinsic for BigInteger::multiplyToLen() method
kvn
parents:
25932
diff
changeset
|
1722 |
Register carry, Register carry2, |
09ad55e5f486
8055494: Add C2 x86 intrinsic for BigInteger::multiplyToLen() method
kvn
parents:
25932
diff
changeset
|
1723 |
Register idx, Register jdx, |
09ad55e5f486
8055494: Add C2 x86 intrinsic for BigInteger::multiplyToLen() method
kvn
parents:
25932
diff
changeset
|
1724 |
Register yz_idx1, Register yz_idx2, |
09ad55e5f486
8055494: Add C2 x86 intrinsic for BigInteger::multiplyToLen() method
kvn
parents:
25932
diff
changeset
|
1725 |
Register tmp, Register tmp3, Register tmp4); |
09ad55e5f486
8055494: Add C2 x86 intrinsic for BigInteger::multiplyToLen() method
kvn
parents:
25932
diff
changeset
|
1726 |
void multiply_128_x_128_loop(Register x_xstart, Register y, Register z, |
09ad55e5f486
8055494: Add C2 x86 intrinsic for BigInteger::multiplyToLen() method
kvn
parents:
25932
diff
changeset
|
1727 |
Register yz_idx, Register idx, Register jdx, |
09ad55e5f486
8055494: Add C2 x86 intrinsic for BigInteger::multiplyToLen() method
kvn
parents:
25932
diff
changeset
|
1728 |
Register carry, Register product, |
09ad55e5f486
8055494: Add C2 x86 intrinsic for BigInteger::multiplyToLen() method
kvn
parents:
25932
diff
changeset
|
1729 |
Register carry2); |
09ad55e5f486
8055494: Add C2 x86 intrinsic for BigInteger::multiplyToLen() method
kvn
parents:
25932
diff
changeset
|
1730 |
void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen, |
09ad55e5f486
8055494: Add C2 x86 intrinsic for BigInteger::multiplyToLen() method
kvn
parents:
25932
diff
changeset
|
1731 |
Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5); |
31129
02ee7609f0e1
8081778: Use Intel x64 CPU instructions for RSA acceleration
kvn
parents:
30624
diff
changeset
|
1732 |
void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3, |
02ee7609f0e1
8081778: Use Intel x64 CPU instructions for RSA acceleration
kvn
parents:
30624
diff
changeset
|
1733 |
Register tmp4, Register tmp5, Register rdxReg, Register raxReg); |
02ee7609f0e1
8081778: Use Intel x64 CPU instructions for RSA acceleration
kvn
parents:
30624
diff
changeset
|
1734 |
void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, |
02ee7609f0e1
8081778: Use Intel x64 CPU instructions for RSA acceleration
kvn
parents:
30624
diff
changeset
|
1735 |
Register tmp2); |
02ee7609f0e1
8081778: Use Intel x64 CPU instructions for RSA acceleration
kvn
parents:
30624
diff
changeset
|
1736 |
void multiply_add_64(Register sum, Register op1, Register op2, Register carry, |
02ee7609f0e1
8081778: Use Intel x64 CPU instructions for RSA acceleration
kvn
parents:
30624
diff
changeset
|
1737 |
Register rdxReg, Register raxReg); |
02ee7609f0e1
8081778: Use Intel x64 CPU instructions for RSA acceleration
kvn
parents:
30624
diff
changeset
|
1738 |
void add_one_64(Register z, Register zlen, Register carry, Register tmp1); |
02ee7609f0e1
8081778: Use Intel x64 CPU instructions for RSA acceleration
kvn
parents:
30624
diff
changeset
|
1739 |
void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, |
02ee7609f0e1
8081778: Use Intel x64 CPU instructions for RSA acceleration
kvn
parents:
30624
diff
changeset
|
1740 |
Register tmp3, Register tmp4); |
02ee7609f0e1
8081778: Use Intel x64 CPU instructions for RSA acceleration
kvn
parents:
30624
diff
changeset
|
1741 |
void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, |
02ee7609f0e1
8081778: Use Intel x64 CPU instructions for RSA acceleration
kvn
parents:
30624
diff
changeset
|
1742 |
Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg); |
02ee7609f0e1
8081778: Use Intel x64 CPU instructions for RSA acceleration
kvn
parents:
30624
diff
changeset
|
1743 |
|
02ee7609f0e1
8081778: Use Intel x64 CPU instructions for RSA acceleration
kvn
parents:
30624
diff
changeset
|
1744 |
void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1, |
02ee7609f0e1
8081778: Use Intel x64 CPU instructions for RSA acceleration
kvn
parents:
30624
diff
changeset
|
1745 |
Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, |
02ee7609f0e1
8081778: Use Intel x64 CPU instructions for RSA acceleration
kvn
parents:
30624
diff
changeset
|
1746 |
Register raxReg); |
02ee7609f0e1
8081778: Use Intel x64 CPU instructions for RSA acceleration
kvn
parents:
30624
diff
changeset
|
1747 |
void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1, |
02ee7609f0e1
8081778: Use Intel x64 CPU instructions for RSA acceleration
kvn
parents:
30624
diff
changeset
|
1748 |
Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, |
02ee7609f0e1
8081778: Use Intel x64 CPU instructions for RSA acceleration
kvn
parents:
30624
diff
changeset
|
1749 |
Register raxReg); |
35110
f19bcdf40799
8143355: Update for addition of vectorizedMismatch intrinsic for x86
kvn
parents:
35086
diff
changeset
|
1750 |
void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, |
f19bcdf40799
8143355: Update for addition of vectorizedMismatch intrinsic for x86
kvn
parents:
35086
diff
changeset
|
1751 |
Register result, Register tmp1, Register tmp2, |
f19bcdf40799
8143355: Update for addition of vectorizedMismatch intrinsic for x86
kvn
parents:
35086
diff
changeset
|
1752 |
XMMRegister vec1, XMMRegister vec2, XMMRegister vec3); |
26434
09ad55e5f486
8055494: Add C2 x86 intrinsic for BigInteger::multiplyToLen() method
kvn
parents:
25932
diff
changeset
|
1753 |
#endif |
09ad55e5f486
8055494: Add C2 x86 intrinsic for BigInteger::multiplyToLen() method
kvn
parents:
25932
diff
changeset
|
1754 |
|
33066 | 1755 |
// CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic. |
18507
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
1756 |
void update_byte_crc32(Register crc, Register val, Register table); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
1757 |
void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp); |
33066 | 1758 |
// CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic |
1759 |
// Note on a naming convention: |
|
1760 |
// Prefix w = register only used on a Westmere+ architecture |
|
1761 |
// Prefix n = register only used on a Nehalem architecture |
|
1762 |
#ifdef _LP64 |
|
1763 |
void crc32c_ipl_alg4(Register in_out, uint32_t n, |
|
1764 |
Register tmp1, Register tmp2, Register tmp3); |
|
1765 |
#else |
|
1766 |
void crc32c_ipl_alg4(Register in_out, uint32_t n, |
|
1767 |
Register tmp1, Register tmp2, Register tmp3, |
|
1768 |
XMMRegister xtmp1, XMMRegister xtmp2); |
|
1769 |
#endif |
|
1770 |
void crc32c_pclmulqdq(XMMRegister w_xtmp1, |
|
1771 |
Register in_out, |
|
1772 |
uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, |
|
1773 |
XMMRegister w_xtmp2, |
|
1774 |
Register tmp1, |
|
1775 |
Register n_tmp2, Register n_tmp3); |
|
1776 |
void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, |
|
1777 |
XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, |
|
1778 |
Register tmp1, Register tmp2, |
|
1779 |
Register n_tmp3); |
|
1780 |
void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, |
|
1781 |
Register in_out1, Register in_out2, Register in_out3, |
|
1782 |
Register tmp1, Register tmp2, Register tmp3, |
|
1783 |
XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, |
|
1784 |
Register tmp4, Register tmp5, |
|
1785 |
Register n_tmp6); |
|
1786 |
void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, |
|
1787 |
Register tmp1, Register tmp2, Register tmp3, |
|
1788 |
Register tmp4, Register tmp5, Register tmp6, |
|
1789 |
XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, |
|
1790 |
bool is_pclmulqdq_supported); |
|
18507
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
1791 |
// Fold 128-bit data chunk |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
1792 |
void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
1793 |
void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
1794 |
// Fold 8-bit data |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
1795 |
void fold_8bit_crc32(Register crc, Register table, Register tmp); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
1796 |
void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp); |
49614
3b1570be8557
8200067: Add support for vpclmulqdq for crc32
srukmannagar
parents:
49027
diff
changeset
|
1797 |
void fold_128bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset); |
18507
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
1798 |
|
33628 | 1799 |
// Compress char[] array to byte[]. |
1800 |
void char_array_compress(Register src, Register dst, Register len, |
|
1801 |
XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, |
|
1802 |
XMMRegister tmp4, Register tmp5, Register result); |
|
1803 |
||
1804 |
// Inflate byte[] array to char[]. |
|
1805 |
void byte_array_inflate(Register src, Register dst, Register len, |
|
1806 |
XMMRegister tmp1, Register tmp2); |
|
1807 |
||
57804 | 1808 |
#ifdef _LP64 |
1809 |
void cache_wb(Address line); |
|
1810 |
void cache_wbsync(bool is_pre); |
|
1811 |
#endif // _LP64 |
|
14626 | 1812 |
}; |
1813 |
||
1814 |
/** |
|
1815 |
* class SkipIfEqual: |
|
1816 |
* |
|
1817 |
* Instantiating this class will result in assembly code being output that will |
|
1818 |
* jump around any code emitted between the creation of the instance and it's |
|
1819 |
* automatic destruction at the end of a scope block, depending on the value of |
|
1820 |
* the flag passed to the constructor, which will be checked at run-time. |
|
1821 |
*/ |
|
1822 |
class SkipIfEqual { |
|
1823 |
private: |
|
1824 |
MacroAssembler* _masm; |
|
1825 |
Label _label; |
|
1826 |
||
1827 |
public: |
|
1828 |
SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value); |
|
1829 |
~SkipIfEqual(); |
|
1830 |
}; |
|
1831 |
||
53244
9807daeb47c4
8216167: Update include guards to reflect correct directories
coleenp
parents:
52990
diff
changeset
|
1832 |
#endif // CPU_X86_MACROASSEMBLER_X86_HPP |