|
1 /* |
|
2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #ifndef CPU_X86_VM_MACROASSEMBLER_X86_HPP |
|
26 #define CPU_X86_VM_MACROASSEMBLER_X86_HPP |
|
27 |
|
28 #include "asm/assembler.hpp" |
|
29 #include "utilities/macros.hpp" |
|
30 #include "runtime/rtmLocking.hpp" |
|
31 |
|
32 // MacroAssembler extends Assembler by frequently used macros. |
|
33 // |
|
34 // Instructions for which a 'better' code sequence exists depending |
|
35 // on arguments should also go in here. |
|
36 |
|
37 class MacroAssembler: public Assembler { |
|
38 friend class LIR_Assembler; |
|
39 friend class Runtime1; // as_Address() |
|
40 |
|
41 protected: |
|
42 |
|
43 Address as_Address(AddressLiteral adr); |
|
44 Address as_Address(ArrayAddress adr); |
|
45 |
|
46 // Support for VM calls |
|
47 // |
|
48 // This is the base routine called by the different versions of call_VM_leaf. The interpreter |
|
49 // may customize this version by overriding it for its purposes (e.g., to save/restore |
|
50 // additional registers when doing a VM call). |
|
51 |
|
52 virtual void call_VM_leaf_base( |
|
53 address entry_point, // the entry point |
|
54 int number_of_arguments // the number of arguments to pop after the call |
|
55 ); |
|
56 |
|
57 // This is the base routine called by the different versions of call_VM. The interpreter |
|
58 // may customize this version by overriding it for its purposes (e.g., to save/restore |
|
59 // additional registers when doing a VM call). |
|
60 // |
|
61 // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base |
|
62 // returns the register which contains the thread upon return. If a thread register has been |
|
63 // specified, the return value will correspond to that register. If no last_java_sp is specified |
|
64 // (noreg) than rsp will be used instead. |
|
65 virtual void call_VM_base( // returns the register containing the thread upon return |
|
66 Register oop_result, // where an oop-result ends up if any; use noreg otherwise |
|
67 Register java_thread, // the thread if computed before ; use noreg otherwise |
|
68 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise |
|
69 address entry_point, // the entry point |
|
70 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call |
|
71 bool check_exceptions // whether to check for pending exceptions after return |
|
72 ); |
|
73 |
|
74 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); |
|
75 |
|
76 // helpers for FPU flag access |
|
77 // tmp is a temporary register, if none is available use noreg |
|
78 void save_rax (Register tmp); |
|
79 void restore_rax(Register tmp); |
|
80 |
|
81 public: |
|
82 MacroAssembler(CodeBuffer* code) : Assembler(code) {} |
|
83 |
|
84 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. |
|
85 // The implementation is only non-empty for the InterpreterMacroAssembler, |
|
86 // as only the interpreter handles PopFrame and ForceEarlyReturn requests. |
|
87 virtual void check_and_handle_popframe(Register java_thread); |
|
88 virtual void check_and_handle_earlyret(Register java_thread); |
|
89 |
|
90 // Support for NULL-checks |
|
91 // |
|
92 // Generates code that causes a NULL OS exception if the content of reg is NULL. |
|
93 // If the accessed location is M[reg + offset] and the offset is known, provide the |
|
94 // offset. No explicit code generation is needed if the offset is within a certain |
|
95 // range (0 <= offset <= page_size). |
|
96 |
|
97 void null_check(Register reg, int offset = -1); |
|
98 static bool needs_explicit_null_check(intptr_t offset); |
|
99 |
|
100 // Required platform-specific helpers for Label::patch_instructions. |
|
101 // They _shadow_ the declarations in AbstractAssembler, which are undefined. |
|
102 void pd_patch_instruction(address branch, address target) { |
|
103 unsigned char op = branch[0]; |
|
104 assert(op == 0xE8 /* call */ || |
|
105 op == 0xE9 /* jmp */ || |
|
106 op == 0xEB /* short jmp */ || |
|
107 (op & 0xF0) == 0x70 /* short jcc */ || |
|
108 op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ || |
|
109 op == 0xC7 && branch[1] == 0xF8 /* xbegin */, |
|
110 "Invalid opcode at patch point"); |
|
111 |
|
112 if (op == 0xEB || (op & 0xF0) == 0x70) { |
|
113 // short offset operators (jmp and jcc) |
|
114 char* disp = (char*) &branch[1]; |
|
115 int imm8 = target - (address) &disp[1]; |
|
116 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset"); |
|
117 *disp = imm8; |
|
118 } else { |
|
119 int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1]; |
|
120 int imm32 = target - (address) &disp[1]; |
|
121 *disp = imm32; |
|
122 } |
|
123 } |
|
124 |
|
125 // The following 4 methods return the offset of the appropriate move instruction |
|
126 |
|
127 // Support for fast byte/short loading with zero extension (depending on particular CPU) |
|
128 int load_unsigned_byte(Register dst, Address src); |
|
129 int load_unsigned_short(Register dst, Address src); |
|
130 |
|
131 // Support for fast byte/short loading with sign extension (depending on particular CPU) |
|
132 int load_signed_byte(Register dst, Address src); |
|
133 int load_signed_short(Register dst, Address src); |
|
134 |
|
135 // Support for sign-extension (hi:lo = extend_sign(lo)) |
|
136 void extend_sign(Register hi, Register lo); |
|
137 |
|
138 // Load and store values by size and signed-ness |
|
139 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg); |
|
140 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg); |
|
141 |
|
142 // Support for inc/dec with optimal instruction selection depending on value |
|
143 |
|
144 void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; } |
|
145 void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; } |
|
146 |
|
147 void decrementl(Address dst, int value = 1); |
|
148 void decrementl(Register reg, int value = 1); |
|
149 |
|
150 void decrementq(Register reg, int value = 1); |
|
151 void decrementq(Address dst, int value = 1); |
|
152 |
|
153 void incrementl(Address dst, int value = 1); |
|
154 void incrementl(Register reg, int value = 1); |
|
155 |
|
156 void incrementq(Register reg, int value = 1); |
|
157 void incrementq(Address dst, int value = 1); |
|
158 |
|
159 // special instructions for EVEX |
|
160 void setvectmask(Register dst, Register src); |
|
161 void restorevectmask(); |
|
162 |
|
163 // Support optimal SSE move instructions. |
|
164 void movflt(XMMRegister dst, XMMRegister src) { |
|
165 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; } |
|
166 else { movss (dst, src); return; } |
|
167 } |
|
168 void movflt(XMMRegister dst, Address src) { movss(dst, src); } |
|
169 void movflt(XMMRegister dst, AddressLiteral src); |
|
170 void movflt(Address dst, XMMRegister src) { movss(dst, src); } |
|
171 |
|
172 void movdbl(XMMRegister dst, XMMRegister src) { |
|
173 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; } |
|
174 else { movsd (dst, src); return; } |
|
175 } |
|
176 |
|
177 void movdbl(XMMRegister dst, AddressLiteral src); |
|
178 |
|
179 void movdbl(XMMRegister dst, Address src) { |
|
180 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; } |
|
181 else { movlpd(dst, src); return; } |
|
182 } |
|
183 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); } |
|
184 |
|
185 void incrementl(AddressLiteral dst); |
|
186 void incrementl(ArrayAddress dst); |
|
187 |
|
188 void incrementq(AddressLiteral dst); |
|
189 |
|
190 // Alignment |
|
191 void align(int modulus); |
|
192 void align(int modulus, int target); |
|
193 |
|
194 // A 5 byte nop that is safe for patching (see patch_verified_entry) |
|
195 void fat_nop(); |
|
196 |
|
197 // Stack frame creation/removal |
|
198 void enter(); |
|
199 void leave(); |
|
200 |
|
201 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information) |
|
202 // The pointer will be loaded into the thread register. |
|
203 void get_thread(Register thread); |
|
204 |
|
205 |
|
206 // Support for VM calls |
|
207 // |
|
208 // It is imperative that all calls into the VM are handled via the call_VM macros. |
|
209 // They make sure that the stack linkage is setup correctly. call_VM's correspond |
|
210 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. |
|
211 |
|
212 |
|
213 void call_VM(Register oop_result, |
|
214 address entry_point, |
|
215 bool check_exceptions = true); |
|
216 void call_VM(Register oop_result, |
|
217 address entry_point, |
|
218 Register arg_1, |
|
219 bool check_exceptions = true); |
|
220 void call_VM(Register oop_result, |
|
221 address entry_point, |
|
222 Register arg_1, Register arg_2, |
|
223 bool check_exceptions = true); |
|
224 void call_VM(Register oop_result, |
|
225 address entry_point, |
|
226 Register arg_1, Register arg_2, Register arg_3, |
|
227 bool check_exceptions = true); |
|
228 |
|
229 // Overloadings with last_Java_sp |
|
230 void call_VM(Register oop_result, |
|
231 Register last_java_sp, |
|
232 address entry_point, |
|
233 int number_of_arguments = 0, |
|
234 bool check_exceptions = true); |
|
235 void call_VM(Register oop_result, |
|
236 Register last_java_sp, |
|
237 address entry_point, |
|
238 Register arg_1, bool |
|
239 check_exceptions = true); |
|
240 void call_VM(Register oop_result, |
|
241 Register last_java_sp, |
|
242 address entry_point, |
|
243 Register arg_1, Register arg_2, |
|
244 bool check_exceptions = true); |
|
245 void call_VM(Register oop_result, |
|
246 Register last_java_sp, |
|
247 address entry_point, |
|
248 Register arg_1, Register arg_2, Register arg_3, |
|
249 bool check_exceptions = true); |
|
250 |
|
251 void get_vm_result (Register oop_result, Register thread); |
|
252 void get_vm_result_2(Register metadata_result, Register thread); |
|
253 |
|
254 // These always tightly bind to MacroAssembler::call_VM_base |
|
255 // bypassing the virtual implementation |
|
256 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); |
|
257 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); |
|
258 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); |
|
259 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); |
|
260 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true); |
|
261 |
|
262 void call_VM_leaf0(address entry_point); |
|
263 void call_VM_leaf(address entry_point, |
|
264 int number_of_arguments = 0); |
|
265 void call_VM_leaf(address entry_point, |
|
266 Register arg_1); |
|
267 void call_VM_leaf(address entry_point, |
|
268 Register arg_1, Register arg_2); |
|
269 void call_VM_leaf(address entry_point, |
|
270 Register arg_1, Register arg_2, Register arg_3); |
|
271 |
|
272 // These always tightly bind to MacroAssembler::call_VM_leaf_base |
|
273 // bypassing the virtual implementation |
|
274 void super_call_VM_leaf(address entry_point); |
|
275 void super_call_VM_leaf(address entry_point, Register arg_1); |
|
276 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); |
|
277 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); |
|
278 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); |
|
279 |
|
280 // last Java Frame (fills frame anchor) |
|
281 void set_last_Java_frame(Register thread, |
|
282 Register last_java_sp, |
|
283 Register last_java_fp, |
|
284 address last_java_pc); |
|
285 |
|
286 // thread in the default location (r15_thread on 64bit) |
|
287 void set_last_Java_frame(Register last_java_sp, |
|
288 Register last_java_fp, |
|
289 address last_java_pc); |
|
290 |
|
291 void reset_last_Java_frame(Register thread, bool clear_fp); |
|
292 |
|
293 // thread in the default location (r15_thread on 64bit) |
|
294 void reset_last_Java_frame(bool clear_fp); |
|
295 |
|
296 // Stores |
|
297 void store_check(Register obj); // store check for obj - register is destroyed afterwards |
|
298 void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed) |
|
299 |
|
300 void resolve_jobject(Register value, Register thread, Register tmp); |
|
301 void clear_jweak_tag(Register possibly_jweak); |
|
302 |
|
303 #if INCLUDE_ALL_GCS |
|
304 |
|
305 void g1_write_barrier_pre(Register obj, |
|
306 Register pre_val, |
|
307 Register thread, |
|
308 Register tmp, |
|
309 bool tosca_live, |
|
310 bool expand_call); |
|
311 |
|
312 void g1_write_barrier_post(Register store_addr, |
|
313 Register new_val, |
|
314 Register thread, |
|
315 Register tmp, |
|
316 Register tmp2); |
|
317 |
|
318 #endif // INCLUDE_ALL_GCS |
|
319 |
|
320 // C 'boolean' to Java boolean: x == 0 ? 0 : 1 |
|
321 void c2bool(Register x); |
|
322 |
|
323 // C++ bool manipulation |
|
324 |
|
325 void movbool(Register dst, Address src); |
|
326 void movbool(Address dst, bool boolconst); |
|
327 void movbool(Address dst, Register src); |
|
328 void testbool(Register dst); |
|
329 |
|
330 void resolve_oop_handle(Register result); |
|
331 void load_mirror(Register mirror, Register method); |
|
332 |
|
333 // oop manipulations |
|
334 void load_klass(Register dst, Register src); |
|
335 void store_klass(Register dst, Register src); |
|
336 |
|
337 void load_heap_oop(Register dst, Address src); |
|
338 void load_heap_oop_not_null(Register dst, Address src); |
|
339 void store_heap_oop(Address dst, Register src); |
|
340 void cmp_heap_oop(Register src1, Address src2, Register tmp = noreg); |
|
341 |
|
342 // Used for storing NULL. All other oop constants should be |
|
343 // stored using routines that take a jobject. |
|
344 void store_heap_oop_null(Address dst); |
|
345 |
|
346 void load_prototype_header(Register dst, Register src); |
|
347 |
|
348 #ifdef _LP64 |
|
349 void store_klass_gap(Register dst, Register src); |
|
350 |
|
351 // This dummy is to prevent a call to store_heap_oop from |
|
352 // converting a zero (like NULL) into a Register by giving |
|
353 // the compiler two choices it can't resolve |
|
354 |
|
355 void store_heap_oop(Address dst, void* dummy); |
|
356 |
|
357 void encode_heap_oop(Register r); |
|
358 void decode_heap_oop(Register r); |
|
359 void encode_heap_oop_not_null(Register r); |
|
360 void decode_heap_oop_not_null(Register r); |
|
361 void encode_heap_oop_not_null(Register dst, Register src); |
|
362 void decode_heap_oop_not_null(Register dst, Register src); |
|
363 |
|
364 void set_narrow_oop(Register dst, jobject obj); |
|
365 void set_narrow_oop(Address dst, jobject obj); |
|
366 void cmp_narrow_oop(Register dst, jobject obj); |
|
367 void cmp_narrow_oop(Address dst, jobject obj); |
|
368 |
|
369 void encode_klass_not_null(Register r); |
|
370 void decode_klass_not_null(Register r); |
|
371 void encode_klass_not_null(Register dst, Register src); |
|
372 void decode_klass_not_null(Register dst, Register src); |
|
373 void set_narrow_klass(Register dst, Klass* k); |
|
374 void set_narrow_klass(Address dst, Klass* k); |
|
375 void cmp_narrow_klass(Register dst, Klass* k); |
|
376 void cmp_narrow_klass(Address dst, Klass* k); |
|
377 |
|
378 // Returns the byte size of the instructions generated by decode_klass_not_null() |
|
379 // when compressed klass pointers are being used. |
|
380 static int instr_size_for_decode_klass_not_null(); |
|
381 |
|
382 // if heap base register is used - reinit it with the correct value |
|
383 void reinit_heapbase(); |
|
384 |
|
385 DEBUG_ONLY(void verify_heapbase(const char* msg);) |
|
386 |
|
387 #endif // _LP64 |
|
388 |
|
389 // Int division/remainder for Java |
|
390 // (as idivl, but checks for special case as described in JVM spec.) |
|
391 // returns idivl instruction offset for implicit exception handling |
|
392 int corrected_idivl(Register reg); |
|
393 |
|
394 // Long division/remainder for Java |
|
395 // (as idivq, but checks for special case as described in JVM spec.) |
|
396 // returns idivq instruction offset for implicit exception handling |
|
397 int corrected_idivq(Register reg); |
|
398 |
|
399 void int3(); |
|
400 |
|
401 // Long operation macros for a 32bit cpu |
|
402 // Long negation for Java |
|
403 void lneg(Register hi, Register lo); |
|
404 |
|
405 // Long multiplication for Java |
|
406 // (destroys contents of eax, ebx, ecx and edx) |
|
407 void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y |
|
408 |
|
409 // Long shifts for Java |
|
410 // (semantics as described in JVM spec.) |
|
411 void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f) |
|
412 void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f) |
|
413 |
|
414 // Long compare for Java |
|
415 // (semantics as described in JVM spec.) |
|
416 void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y) |
|
417 |
|
418 |
|
419 // misc |
|
420 |
|
421 // Sign extension |
|
422 void sign_extend_short(Register reg); |
|
423 void sign_extend_byte(Register reg); |
|
424 |
|
425 // Division by power of 2, rounding towards 0 |
|
426 void division_with_shift(Register reg, int shift_value); |
|
427 |
|
428 // Compares the top-most stack entries on the FPU stack and sets the eflags as follows: |
|
429 // |
|
430 // CF (corresponds to C0) if x < y |
|
431 // PF (corresponds to C2) if unordered |
|
432 // ZF (corresponds to C3) if x = y |
|
433 // |
|
434 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). |
|
435 // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code) |
|
436 void fcmp(Register tmp); |
|
437 // Variant of the above which allows y to be further down the stack |
|
438 // and which only pops x and y if specified. If pop_right is |
|
439 // specified then pop_left must also be specified. |
|
440 void fcmp(Register tmp, int index, bool pop_left, bool pop_right); |
|
441 |
|
442 // Floating-point comparison for Java |
|
443 // Compares the top-most stack entries on the FPU stack and stores the result in dst. |
|
444 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). |
|
445 // (semantics as described in JVM spec.) |
|
446 void fcmp2int(Register dst, bool unordered_is_less); |
|
447 // Variant of the above which allows y to be further down the stack |
|
448 // and which only pops x and y if specified. If pop_right is |
|
449 // specified then pop_left must also be specified. |
|
450 void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right); |
|
451 |
|
452 // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards) |
|
453 // tmp is a temporary register, if none is available use noreg |
|
454 void fremr(Register tmp); |
|
455 |
|
456 // dst = c = a * b + c |
|
457 void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); |
|
458 void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); |
|
459 |
|
460 void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); |
|
461 void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); |
|
462 void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); |
|
463 void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); |
|
464 |
|
465 |
|
466 // same as fcmp2int, but using SSE2 |
|
467 void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); |
|
468 void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); |
|
469 |
|
470 // branch to L if FPU flag C2 is set/not set |
|
471 // tmp is a temporary register, if none is available use noreg |
|
472 void jC2 (Register tmp, Label& L); |
|
473 void jnC2(Register tmp, Label& L); |
|
474 |
|
475 // Pop ST (ffree & fincstp combined) |
|
476 void fpop(); |
|
477 |
|
478 // Load float value from 'address'. If UseSSE >= 1, the value is loaded into |
|
479 // register xmm0. Otherwise, the value is loaded onto the FPU stack. |
|
480 void load_float(Address src); |
|
481 |
|
482 // Store float value to 'address'. If UseSSE >= 1, the value is stored |
|
483 // from register xmm0. Otherwise, the value is stored from the FPU stack. |
|
484 void store_float(Address dst); |
|
485 |
|
486 // Load double value from 'address'. If UseSSE >= 2, the value is loaded into |
|
487 // register xmm0. Otherwise, the value is loaded onto the FPU stack. |
|
488 void load_double(Address src); |
|
489 |
|
490 // Store double value to 'address'. If UseSSE >= 2, the value is stored |
|
491 // from register xmm0. Otherwise, the value is stored from the FPU stack. |
|
492 void store_double(Address dst); |
|
493 |
|
494 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack |
|
495 void push_fTOS(); |
|
496 |
|
497 // pops double TOS element from CPU stack and pushes on FPU stack |
|
498 void pop_fTOS(); |
|
499 |
|
500 void empty_FPU_stack(); |
|
501 |
|
502 void push_IU_state(); |
|
503 void pop_IU_state(); |
|
504 |
|
505 void push_FPU_state(); |
|
506 void pop_FPU_state(); |
|
507 |
|
508 void push_CPU_state(); |
|
509 void pop_CPU_state(); |
|
510 |
|
511 // Round up to a power of two |
|
512 void round_to(Register reg, int modulus); |
|
513 |
|
514 // Callee saved registers handling |
|
515 void push_callee_saved_registers(); |
|
516 void pop_callee_saved_registers(); |
|
517 |
|
518 // allocation |
|
519 void eden_allocate( |
|
520 Register obj, // result: pointer to object after successful allocation |
|
521 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise |
|
522 int con_size_in_bytes, // object size in bytes if known at compile time |
|
523 Register t1, // temp register |
|
524 Label& slow_case // continuation point if fast allocation fails |
|
525 ); |
|
526 void tlab_allocate( |
|
527 Register obj, // result: pointer to object after successful allocation |
|
528 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise |
|
529 int con_size_in_bytes, // object size in bytes if known at compile time |
|
530 Register t1, // temp register |
|
531 Register t2, // temp register |
|
532 Label& slow_case // continuation point if fast allocation fails |
|
533 ); |
|
534 Register tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case); // returns TLS address |
|
535 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp); |
|
536 |
|
537 void incr_allocated_bytes(Register thread, |
|
538 Register var_size_in_bytes, int con_size_in_bytes, |
|
539 Register t1 = noreg); |
|
540 |
|
541 // interface method calling |
|
542 void lookup_interface_method(Register recv_klass, |
|
543 Register intf_klass, |
|
544 RegisterOrConstant itable_index, |
|
545 Register method_result, |
|
546 Register scan_temp, |
|
547 Label& no_such_interface); |
|
548 |
|
549 // virtual method calling |
|
550 void lookup_virtual_method(Register recv_klass, |
|
551 RegisterOrConstant vtable_index, |
|
552 Register method_result); |
|
553 |
|
554 // Test sub_klass against super_klass, with fast and slow paths. |
|
555 |
|
556 // The fast path produces a tri-state answer: yes / no / maybe-slow. |
|
557 // One of the three labels can be NULL, meaning take the fall-through. |
|
558 // If super_check_offset is -1, the value is loaded up from super_klass. |
|
559 // No registers are killed, except temp_reg. |
|
560 void check_klass_subtype_fast_path(Register sub_klass, |
|
561 Register super_klass, |
|
562 Register temp_reg, |
|
563 Label* L_success, |
|
564 Label* L_failure, |
|
565 Label* L_slow_path, |
|
566 RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); |
|
567 |
|
568 // The rest of the type check; must be wired to a corresponding fast path. |
|
569 // It does not repeat the fast path logic, so don't use it standalone. |
|
570 // The temp_reg and temp2_reg can be noreg, if no temps are available. |
|
571 // Updates the sub's secondary super cache as necessary. |
|
572 // If set_cond_codes, condition codes will be Z on success, NZ on failure. |
|
573 void check_klass_subtype_slow_path(Register sub_klass, |
|
574 Register super_klass, |
|
575 Register temp_reg, |
|
576 Register temp2_reg, |
|
577 Label* L_success, |
|
578 Label* L_failure, |
|
579 bool set_cond_codes = false); |
|
580 |
|
581 // Simplified, combined version, good for typical uses. |
|
582 // Falls through on failure. |
|
583 void check_klass_subtype(Register sub_klass, |
|
584 Register super_klass, |
|
585 Register temp_reg, |
|
586 Label& L_success); |
|
587 |
|
588 // method handles (JSR 292) |
|
589 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); |
|
590 |
|
591 //---- |
|
592 void set_word_if_not_zero(Register reg); // sets reg to 1 if not zero, otherwise 0 |
|
593 |
|
594 // Debugging |
|
595 |
|
596 // only if +VerifyOops |
|
597 // TODO: Make these macros with file and line like sparc version! |
|
598 void verify_oop(Register reg, const char* s = "broken oop"); |
|
599 void verify_oop_addr(Address addr, const char * s = "broken oop addr"); |
|
600 |
|
601 // TODO: verify method and klass metadata (compare against vptr?) |
|
602 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} |
|
603 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){} |
|
604 |
|
605 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) |
|
606 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) |
|
607 |
|
608 // only if +VerifyFPU |
|
609 void verify_FPU(int stack_depth, const char* s = "illegal FPU state"); |
|
610 |
|
611 // Verify or restore cpu control state after JNI call |
|
612 void restore_cpu_control_state_after_jni(); |
|
613 |
|
614 // prints msg, dumps registers and stops execution |
|
615 void stop(const char* msg); |
|
616 |
|
617 // prints msg and continues |
|
618 void warn(const char* msg); |
|
619 |
|
620 // dumps registers and other state |
|
621 void print_state(); |
|
622 |
|
623 static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg); |
|
624 static void debug64(char* msg, int64_t pc, int64_t regs[]); |
|
625 static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip); |
|
626 static void print_state64(int64_t pc, int64_t regs[]); |
|
627 |
|
628 void os_breakpoint(); |
|
629 |
|
630 void untested() { stop("untested"); } |
|
631 |
|
632 void unimplemented(const char* what = ""); |
|
633 |
|
634 void should_not_reach_here() { stop("should not reach here"); } |
|
635 |
|
636 void print_CPU_state(); |
|
637 |
|
638 // Stack overflow checking |
|
639 void bang_stack_with_offset(int offset) { |
|
640 // stack grows down, caller passes positive offset |
|
641 assert(offset > 0, "must bang with negative offset"); |
|
642 movl(Address(rsp, (-offset)), rax); |
|
643 } |
|
644 |
|
645 // Writes to stack successive pages until offset reached to check for |
|
646 // stack overflow + shadow pages. Also, clobbers tmp |
|
647 void bang_stack_size(Register size, Register tmp); |
|
648 |
|
649 // Check for reserved stack access in method being exited (for JIT) |
|
650 void reserved_stack_check(); |
|
651 |
|
652 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, |
|
653 Register tmp, |
|
654 int offset); |
|
655 |
|
656 // Support for serializing memory accesses between threads |
|
657 void serialize_memory(Register thread, Register tmp); |
|
658 |
|
659 void verify_tlab(); |
|
660 |
|
661 // Biased locking support |
|
662 // lock_reg and obj_reg must be loaded up with the appropriate values. |
|
663 // swap_reg must be rax, and is killed. |
|
664 // tmp_reg is optional. If it is supplied (i.e., != noreg) it will |
|
665 // be killed; if not supplied, push/pop will be used internally to |
|
666 // allocate a temporary (inefficient, avoid if possible). |
|
667 // Optional slow case is for implementations (interpreter and C1) which branch to |
|
668 // slow case directly. Leaves condition codes set for C2's Fast_Lock node. |
|
669 // Returns offset of first potentially-faulting instruction for null |
|
670 // check info (currently consumed only by C1). If |
|
671 // swap_reg_contains_mark is true then returns -1 as it is assumed |
|
672 // the calling code has already passed any potential faults. |
|
673 int biased_locking_enter(Register lock_reg, Register obj_reg, |
|
674 Register swap_reg, Register tmp_reg, |
|
675 bool swap_reg_contains_mark, |
|
676 Label& done, Label* slow_case = NULL, |
|
677 BiasedLockingCounters* counters = NULL); |
|
678 void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done); |
|
679 #ifdef COMPILER2 |
|
680 // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file. |
|
681 // See full desription in macroAssembler_x86.cpp. |
|
682 void fast_lock(Register obj, Register box, Register tmp, |
|
683 Register scr, Register cx1, Register cx2, |
|
684 BiasedLockingCounters* counters, |
|
685 RTMLockingCounters* rtm_counters, |
|
686 RTMLockingCounters* stack_rtm_counters, |
|
687 Metadata* method_data, |
|
688 bool use_rtm, bool profile_rtm); |
|
689 void fast_unlock(Register obj, Register box, Register tmp, bool use_rtm); |
|
690 #if INCLUDE_RTM_OPT |
|
691 void rtm_counters_update(Register abort_status, Register rtm_counters); |
|
692 void branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel); |
|
693 void rtm_abort_ratio_calculation(Register tmp, Register rtm_counters_reg, |
|
694 RTMLockingCounters* rtm_counters, |
|
695 Metadata* method_data); |
|
696 void rtm_profiling(Register abort_status_Reg, Register rtm_counters_Reg, |
|
697 RTMLockingCounters* rtm_counters, Metadata* method_data, bool profile_rtm); |
|
698 void rtm_retry_lock_on_abort(Register retry_count, Register abort_status, Label& retryLabel); |
|
699 void rtm_retry_lock_on_busy(Register retry_count, Register box, Register tmp, Register scr, Label& retryLabel); |
|
700 void rtm_stack_locking(Register obj, Register tmp, Register scr, |
|
701 Register retry_on_abort_count, |
|
702 RTMLockingCounters* stack_rtm_counters, |
|
703 Metadata* method_data, bool profile_rtm, |
|
704 Label& DONE_LABEL, Label& IsInflated); |
|
705 void rtm_inflated_locking(Register obj, Register box, Register tmp, |
|
706 Register scr, Register retry_on_busy_count, |
|
707 Register retry_on_abort_count, |
|
708 RTMLockingCounters* rtm_counters, |
|
709 Metadata* method_data, bool profile_rtm, |
|
710 Label& DONE_LABEL); |
|
711 #endif |
|
712 #endif |
|
713 |
|
714 Condition negate_condition(Condition cond); |
|
715 |
|
716 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit |
|
717 // operands. In general the names are modified to avoid hiding the instruction in Assembler |
|
718 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers |
|
719 // here in MacroAssembler. The major exception to this rule is call |
|
720 |
|
721 // Arithmetics |
|
722 |
|
723 |
|
724 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; } |
|
725 void addptr(Address dst, Register src); |
|
726 |
|
727 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); } |
|
728 void addptr(Register dst, int32_t src); |
|
729 void addptr(Register dst, Register src); |
|
730 void addptr(Register dst, RegisterOrConstant src) { |
|
731 if (src.is_constant()) addptr(dst, (int) src.as_constant()); |
|
732 else addptr(dst, src.as_register()); |
|
733 } |
|
734 |
|
735 void andptr(Register dst, int32_t src); |
|
736 void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; } |
|
737 |
|
738 void cmp8(AddressLiteral src1, int imm); |
|
739 |
|
740 // renamed to drag out the casting of address to int32_t/intptr_t |
|
741 void cmp32(Register src1, int32_t imm); |
|
742 |
|
743 void cmp32(AddressLiteral src1, int32_t imm); |
|
744 // compare reg - mem, or reg - &mem |
|
745 void cmp32(Register src1, AddressLiteral src2); |
|
746 |
|
747 void cmp32(Register src1, Address src2); |
|
748 |
|
749 #ifndef _LP64 |
|
750 void cmpklass(Address dst, Metadata* obj); |
|
751 void cmpklass(Register dst, Metadata* obj); |
|
752 void cmpoop(Address dst, jobject obj); |
|
753 void cmpoop(Register dst, jobject obj); |
|
754 #endif // _LP64 |
|
755 |
|
756 // NOTE src2 must be the lval. This is NOT an mem-mem compare |
|
757 void cmpptr(Address src1, AddressLiteral src2); |
|
758 |
|
759 void cmpptr(Register src1, AddressLiteral src2); |
|
760 |
|
761 void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } |
|
762 void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } |
|
763 // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } |
|
764 |
|
765 void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } |
|
766 void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } |
|
767 |
|
768 // cmp64 to avoild hiding cmpq |
|
769 void cmp64(Register src1, AddressLiteral src); |
|
770 |
|
771 void cmpxchgptr(Register reg, Address adr); |
|
772 |
|
773 void locked_cmpxchgptr(Register reg, AddressLiteral adr); |
|
774 |
|
775 |
|
776 void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); } |
|
777 void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); } |
|
778 |
|
779 |
|
780 void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); } |
|
781 |
|
782 void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); } |
|
783 |
|
784 void shlptr(Register dst, int32_t shift); |
|
785 void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); } |
|
786 |
|
787 void shrptr(Register dst, int32_t shift); |
|
788 void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); } |
|
789 |
|
790 void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); } |
|
791 void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); } |
|
792 |
|
793 void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } |
|
794 |
|
795 void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } |
|
796 void subptr(Register dst, int32_t src); |
|
797 // Force generation of a 4 byte immediate value even if it fits into 8bit |
|
798 void subptr_imm32(Register dst, int32_t src); |
|
799 void subptr(Register dst, Register src); |
|
800 void subptr(Register dst, RegisterOrConstant src) { |
|
801 if (src.is_constant()) subptr(dst, (int) src.as_constant()); |
|
802 else subptr(dst, src.as_register()); |
|
803 } |
|
804 |
|
805 void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } |
|
806 void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } |
|
807 |
|
808 void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } |
|
809 void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } |
|
810 |
|
811 void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; } |
|
812 |
|
813 |
|
814 |
|
815 // Helper functions for statistics gathering. |
|
816 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes. |
|
817 void cond_inc32(Condition cond, AddressLiteral counter_addr); |
|
818 // Unconditional atomic increment. |
|
819 void atomic_incl(Address counter_addr); |
|
820 void atomic_incl(AddressLiteral counter_addr, Register scr = rscratch1); |
|
821 #ifdef _LP64 |
|
822 void atomic_incq(Address counter_addr); |
|
823 void atomic_incq(AddressLiteral counter_addr, Register scr = rscratch1); |
|
824 #endif |
|
825 void atomic_incptr(AddressLiteral counter_addr, Register scr = rscratch1) { LP64_ONLY(atomic_incq(counter_addr, scr)) NOT_LP64(atomic_incl(counter_addr, scr)) ; } |
|
826 void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; } |
|
827 |
|
828 void lea(Register dst, AddressLiteral adr); |
|
829 void lea(Address dst, AddressLiteral adr); |
|
830 void lea(Register dst, Address adr) { Assembler::lea(dst, adr); } |
|
831 |
|
832 void leal32(Register dst, Address src) { leal(dst, src); } |
|
833 |
|
834 // Import other testl() methods from the parent class or else |
|
835 // they will be hidden by the following overriding declaration. |
|
836 using Assembler::testl; |
|
837 void testl(Register dst, AddressLiteral src); |
|
838 |
|
839 void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } |
|
840 void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } |
|
841 void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } |
|
842 void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); } |
|
843 |
|
844 void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); } |
|
845 void testptr(Register src1, Register src2); |
|
846 |
|
847 void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } |
|
848 void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } |
|
849 |
|
850 // Calls |
|
851 |
|
852 void call(Label& L, relocInfo::relocType rtype); |
|
853 void call(Register entry); |
|
854 |
|
855 // NOTE: this call transfers to the effective address of entry NOT |
|
856 // the address contained by entry. This is because this is more natural |
|
857 // for jumps/calls. |
|
858 void call(AddressLiteral entry); |
|
859 |
|
860 // Emit the CompiledIC call idiom |
|
861 void ic_call(address entry, jint method_index = 0); |
|
862 |
|
863 // Jumps |
|
864 |
|
865 // NOTE: these jumps tranfer to the effective address of dst NOT |
|
866 // the address contained by dst. This is because this is more natural |
|
867 // for jumps/calls. |
|
868 void jump(AddressLiteral dst); |
|
869 void jump_cc(Condition cc, AddressLiteral dst); |
|
870 |
|
871 // 32bit can do a case table jump in one instruction but we no longer allow the base |
|
872 // to be installed in the Address class. This jump will tranfers to the address |
|
873 // contained in the location described by entry (not the address of entry) |
|
874 void jump(ArrayAddress entry); |
|
875 |
|
876 // Floating |
|
877 |
|
878 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); } |
|
879 void andpd(XMMRegister dst, AddressLiteral src); |
|
880 void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); } |
|
881 |
|
882 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); } |
|
883 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); } |
|
884 void andps(XMMRegister dst, AddressLiteral src); |
|
885 |
|
886 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); } |
|
887 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); } |
|
888 void comiss(XMMRegister dst, AddressLiteral src); |
|
889 |
|
890 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); } |
|
891 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); } |
|
892 void comisd(XMMRegister dst, AddressLiteral src); |
|
893 |
|
894 void fadd_s(Address src) { Assembler::fadd_s(src); } |
|
895 void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); } |
|
896 |
|
897 void fldcw(Address src) { Assembler::fldcw(src); } |
|
898 void fldcw(AddressLiteral src); |
|
899 |
|
900 void fld_s(int index) { Assembler::fld_s(index); } |
|
901 void fld_s(Address src) { Assembler::fld_s(src); } |
|
902 void fld_s(AddressLiteral src); |
|
903 |
|
904 void fld_d(Address src) { Assembler::fld_d(src); } |
|
905 void fld_d(AddressLiteral src); |
|
906 |
|
907 void fld_x(Address src) { Assembler::fld_x(src); } |
|
908 void fld_x(AddressLiteral src); |
|
909 |
|
910 void fmul_s(Address src) { Assembler::fmul_s(src); } |
|
911 void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); } |
|
912 |
|
913 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); } |
|
914 void ldmxcsr(AddressLiteral src); |
|
915 |
|
916 #ifdef _LP64 |
|
917 private: |
|
918 void sha256_AVX2_one_round_compute( |
|
919 Register reg_old_h, |
|
920 Register reg_a, |
|
921 Register reg_b, |
|
922 Register reg_c, |
|
923 Register reg_d, |
|
924 Register reg_e, |
|
925 Register reg_f, |
|
926 Register reg_g, |
|
927 Register reg_h, |
|
928 int iter); |
|
929 void sha256_AVX2_four_rounds_compute_first(int start); |
|
930 void sha256_AVX2_four_rounds_compute_last(int start); |
|
931 void sha256_AVX2_one_round_and_sched( |
|
932 XMMRegister xmm_0, /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */ |
|
933 XMMRegister xmm_1, /* ymm5 */ /* full cycle is 16 iterations */ |
|
934 XMMRegister xmm_2, /* ymm6 */ |
|
935 XMMRegister xmm_3, /* ymm7 */ |
|
936 Register reg_a, /* == eax on 0 iteration, then rotate 8 register right on each next iteration */ |
|
937 Register reg_b, /* ebx */ /* full cycle is 8 iterations */ |
|
938 Register reg_c, /* edi */ |
|
939 Register reg_d, /* esi */ |
|
940 Register reg_e, /* r8d */ |
|
941 Register reg_f, /* r9d */ |
|
942 Register reg_g, /* r10d */ |
|
943 Register reg_h, /* r11d */ |
|
944 int iter); |
|
945 |
|
946 void addm(int disp, Register r1, Register r2); |
|
947 |
|
948 public: |
|
949 void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, |
|
950 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, |
|
951 Register buf, Register state, Register ofs, Register limit, Register rsp, |
|
952 bool multi_block, XMMRegister shuf_mask); |
|
953 #endif |
|
954 |
|
955 #ifdef _LP64 |
|
956 private: |
|
957 void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d, |
|
958 Register e, Register f, Register g, Register h, int iteration); |
|
959 |
|
960 void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, |
|
961 Register a, Register b, Register c, Register d, Register e, Register f, |
|
962 Register g, Register h, int iteration); |
|
963 |
|
964 void addmq(int disp, Register r1, Register r2); |
|
965 public: |
|
966 void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, |
|
967 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, |
|
968 Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block, |
|
969 XMMRegister shuf_mask); |
|
970 #endif |
|
971 |
|
972 void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0, |
|
973 XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask, |
|
974 Register buf, Register state, Register ofs, Register limit, Register rsp, |
|
975 bool multi_block); |
|
976 |
|
977 #ifdef _LP64 |
|
978 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, |
|
979 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, |
|
980 Register buf, Register state, Register ofs, Register limit, Register rsp, |
|
981 bool multi_block, XMMRegister shuf_mask); |
|
982 #else |
|
983 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, |
|
984 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, |
|
985 Register buf, Register state, Register ofs, Register limit, Register rsp, |
|
986 bool multi_block); |
|
987 #endif |
|
988 |
|
989 void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, |
|
990 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, |
|
991 Register rax, Register rcx, Register rdx, Register tmp); |
|
992 |
|
993 #ifdef _LP64 |
|
994 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, |
|
995 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, |
|
996 Register rax, Register rcx, Register rdx, Register tmp1, Register tmp2); |
|
997 |
|
998 void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, |
|
999 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, |
|
1000 Register rax, Register rcx, Register rdx, Register r11); |
|
1001 |
|
1002 void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, |
|
1003 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx, |
|
1004 Register rdx, Register tmp1, Register tmp2, Register tmp3, Register tmp4); |
|
1005 |
|
1006 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, |
|
1007 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, |
|
1008 Register rax, Register rbx, Register rcx, Register rdx, Register tmp1, Register tmp2, |
|
1009 Register tmp3, Register tmp4); |
|
1010 |
|
1011 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, |
|
1012 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, |
|
1013 Register rax, Register rcx, Register rdx, Register tmp1, |
|
1014 Register tmp2, Register tmp3, Register tmp4); |
|
1015 void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, |
|
1016 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, |
|
1017 Register rax, Register rcx, Register rdx, Register tmp1, |
|
1018 Register tmp2, Register tmp3, Register tmp4); |
|
1019 #else |
|
1020 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, |
|
1021 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, |
|
1022 Register rax, Register rcx, Register rdx, Register tmp1); |
|
1023 |
|
1024 void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, |
|
1025 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, |
|
1026 Register rax, Register rcx, Register rdx, Register tmp); |
|
1027 |
|
1028 void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, |
|
1029 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx, |
|
1030 Register rdx, Register tmp); |
|
1031 |
|
1032 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, |
|
1033 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, |
|
1034 Register rax, Register rbx, Register rdx); |
|
1035 |
|
1036 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, |
|
1037 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, |
|
1038 Register rax, Register rcx, Register rdx, Register tmp); |
|
1039 |
|
1040 void libm_sincos_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, |
|
1041 Register edx, Register ebx, Register esi, Register edi, |
|
1042 Register ebp, Register esp); |
|
1043 |
|
1044 void libm_reduce_pi04l(Register eax, Register ecx, Register edx, Register ebx, |
|
1045 Register esi, Register edi, Register ebp, Register esp); |
|
1046 |
|
1047 void libm_tancot_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, |
|
1048 Register edx, Register ebx, Register esi, Register edi, |
|
1049 Register ebp, Register esp); |
|
1050 |
|
1051 void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, |
|
1052 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, |
|
1053 Register rax, Register rcx, Register rdx, Register tmp); |
|
1054 #endif |
|
1055 |
|
1056 void increase_precision(); |
|
1057 void restore_precision(); |
|
1058 |
|
1059 private: |
|
1060 |
|
1061 // these are private because users should be doing movflt/movdbl |
|
1062 |
|
1063 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); } |
|
1064 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); } |
|
1065 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); } |
|
1066 void movss(XMMRegister dst, AddressLiteral src); |
|
1067 |
|
1068 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); } |
|
1069 void movlpd(XMMRegister dst, AddressLiteral src); |
|
1070 |
|
1071 public: |
|
1072 |
|
1073 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); } |
|
1074 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); } |
|
1075 void addsd(XMMRegister dst, AddressLiteral src); |
|
1076 |
|
1077 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); } |
|
1078 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); } |
|
1079 void addss(XMMRegister dst, AddressLiteral src); |
|
1080 |
|
1081 void addpd(XMMRegister dst, XMMRegister src) { Assembler::addpd(dst, src); } |
|
1082 void addpd(XMMRegister dst, Address src) { Assembler::addpd(dst, src); } |
|
1083 void addpd(XMMRegister dst, AddressLiteral src); |
|
1084 |
|
1085 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); } |
|
1086 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); } |
|
1087 void divsd(XMMRegister dst, AddressLiteral src); |
|
1088 |
|
1089 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); } |
|
1090 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); } |
|
1091 void divss(XMMRegister dst, AddressLiteral src); |
|
1092 |
|
1093 // Move Unaligned Double Quadword |
|
1094 void movdqu(Address dst, XMMRegister src); |
|
1095 void movdqu(XMMRegister dst, Address src); |
|
1096 void movdqu(XMMRegister dst, XMMRegister src); |
|
1097 void movdqu(XMMRegister dst, AddressLiteral src, Register scratchReg = rscratch1); |
|
1098 // AVX Unaligned forms |
|
1099 void vmovdqu(Address dst, XMMRegister src); |
|
1100 void vmovdqu(XMMRegister dst, Address src); |
|
1101 void vmovdqu(XMMRegister dst, XMMRegister src); |
|
1102 void vmovdqu(XMMRegister dst, AddressLiteral src); |
|
1103 |
|
1104 // Move Aligned Double Quadword |
|
1105 void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); } |
|
1106 void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); } |
|
1107 void movdqa(XMMRegister dst, AddressLiteral src); |
|
1108 |
|
1109 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } |
|
1110 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } |
|
1111 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } |
|
1112 void movsd(XMMRegister dst, AddressLiteral src); |
|
1113 |
|
1114 void mulpd(XMMRegister dst, XMMRegister src) { Assembler::mulpd(dst, src); } |
|
1115 void mulpd(XMMRegister dst, Address src) { Assembler::mulpd(dst, src); } |
|
1116 void mulpd(XMMRegister dst, AddressLiteral src); |
|
1117 |
|
1118 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); } |
|
1119 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); } |
|
1120 void mulsd(XMMRegister dst, AddressLiteral src); |
|
1121 |
|
1122 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); } |
|
1123 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); } |
|
1124 void mulss(XMMRegister dst, AddressLiteral src); |
|
1125 |
|
1126 // Carry-Less Multiplication Quadword |
|
1127 void pclmulldq(XMMRegister dst, XMMRegister src) { |
|
1128 // 0x00 - multiply lower 64 bits [0:63] |
|
1129 Assembler::pclmulqdq(dst, src, 0x00); |
|
1130 } |
|
1131 void pclmulhdq(XMMRegister dst, XMMRegister src) { |
|
1132 // 0x11 - multiply upper 64 bits [64:127] |
|
1133 Assembler::pclmulqdq(dst, src, 0x11); |
|
1134 } |
|
1135 |
|
1136 void pcmpeqb(XMMRegister dst, XMMRegister src); |
|
1137 void pcmpeqw(XMMRegister dst, XMMRegister src); |
|
1138 |
|
1139 void pcmpestri(XMMRegister dst, Address src, int imm8); |
|
1140 void pcmpestri(XMMRegister dst, XMMRegister src, int imm8); |
|
1141 |
|
1142 void pmovzxbw(XMMRegister dst, XMMRegister src); |
|
1143 void pmovzxbw(XMMRegister dst, Address src); |
|
1144 |
|
1145 void pmovmskb(Register dst, XMMRegister src); |
|
1146 |
|
1147 void ptest(XMMRegister dst, XMMRegister src); |
|
1148 |
|
1149 void sqrtsd(XMMRegister dst, XMMRegister src) { Assembler::sqrtsd(dst, src); } |
|
1150 void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); } |
|
1151 void sqrtsd(XMMRegister dst, AddressLiteral src); |
|
1152 |
|
1153 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); } |
|
1154 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); } |
|
1155 void sqrtss(XMMRegister dst, AddressLiteral src); |
|
1156 |
|
1157 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); } |
|
1158 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); } |
|
1159 void subsd(XMMRegister dst, AddressLiteral src); |
|
1160 |
|
1161 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); } |
|
1162 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); } |
|
1163 void subss(XMMRegister dst, AddressLiteral src); |
|
1164 |
|
1165 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); } |
|
1166 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); } |
|
1167 void ucomiss(XMMRegister dst, AddressLiteral src); |
|
1168 |
|
1169 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); } |
|
1170 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); } |
|
1171 void ucomisd(XMMRegister dst, AddressLiteral src); |
|
1172 |
|
1173 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values |
|
1174 void xorpd(XMMRegister dst, XMMRegister src); |
|
1175 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); } |
|
1176 void xorpd(XMMRegister dst, AddressLiteral src); |
|
1177 |
|
1178 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values |
|
1179 void xorps(XMMRegister dst, XMMRegister src); |
|
1180 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); } |
|
1181 void xorps(XMMRegister dst, AddressLiteral src); |
|
1182 |
|
1183 // Shuffle Bytes |
|
1184 void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); } |
|
1185 void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); } |
|
1186 void pshufb(XMMRegister dst, AddressLiteral src); |
|
1187 // AVX 3-operands instructions |
|
1188 |
|
1189 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); } |
|
1190 void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); } |
|
1191 void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); |
|
1192 |
|
1193 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); } |
|
1194 void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); } |
|
1195 void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src); |
|
1196 |
|
1197 void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len); |
|
1198 void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len); |
|
1199 |
|
1200 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); |
|
1201 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); |
|
1202 |
|
1203 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); |
|
1204 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); |
|
1205 |
|
1206 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } |
|
1207 void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } |
|
1208 void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len); |
|
1209 |
|
1210 void vpbroadcastw(XMMRegister dst, XMMRegister src); |
|
1211 |
|
1212 void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); |
|
1213 void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); |
|
1214 |
|
1215 void vpmovzxbw(XMMRegister dst, Address src, int vector_len); |
|
1216 void vpmovmskb(Register dst, XMMRegister src); |
|
1217 |
|
1218 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); |
|
1219 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); |
|
1220 |
|
1221 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); |
|
1222 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); |
|
1223 |
|
1224 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); |
|
1225 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); |
|
1226 |
|
1227 void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); |
|
1228 void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); |
|
1229 |
|
1230 void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); |
|
1231 void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); |
|
1232 |
|
1233 void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); |
|
1234 void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); |
|
1235 |
|
1236 void vptest(XMMRegister dst, XMMRegister src); |
|
1237 |
|
1238 void punpcklbw(XMMRegister dst, XMMRegister src); |
|
1239 void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); } |
|
1240 |
|
1241 void pshufd(XMMRegister dst, Address src, int mode); |
|
1242 void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); } |
|
1243 |
|
1244 void pshuflw(XMMRegister dst, XMMRegister src, int mode); |
|
1245 void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); } |
|
1246 |
|
1247 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } |
|
1248 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } |
|
1249 void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len); |
|
1250 |
|
1251 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } |
|
1252 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } |
|
1253 void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len); |
|
1254 |
|
1255 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); } |
|
1256 void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); } |
|
1257 void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); |
|
1258 |
|
1259 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); } |
|
1260 void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); } |
|
1261 void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src); |
|
1262 |
|
1263 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); } |
|
1264 void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); } |
|
1265 void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); |
|
1266 |
|
1267 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); } |
|
1268 void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); } |
|
1269 void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src); |
|
1270 |
|
1271 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); } |
|
1272 void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); } |
|
1273 void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); |
|
1274 |
|
1275 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); } |
|
1276 void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); } |
|
1277 void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src); |
|
1278 |
|
1279 void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src); |
|
1280 void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src); |
|
1281 |
|
1282 // AVX Vector instructions |
|
1283 |
|
1284 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } |
|
1285 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } |
|
1286 void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len); |
|
1287 |
|
1288 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } |
|
1289 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } |
|
1290 void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len); |
|
1291 |
|
1292 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { |
|
1293 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 |
|
1294 Assembler::vpxor(dst, nds, src, vector_len); |
|
1295 else |
|
1296 Assembler::vxorpd(dst, nds, src, vector_len); |
|
1297 } |
|
1298 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { |
|
1299 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 |
|
1300 Assembler::vpxor(dst, nds, src, vector_len); |
|
1301 else |
|
1302 Assembler::vxorpd(dst, nds, src, vector_len); |
|
1303 } |
|
1304 |
|
1305 // Simple version for AVX2 256bit vectors |
|
1306 void vpxor(XMMRegister dst, XMMRegister src) { Assembler::vpxor(dst, dst, src, true); } |
|
1307 void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); } |
|
1308 |
|
1309 void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { |
|
1310 if (UseAVX > 2) { |
|
1311 Assembler::vinserti32x4(dst, dst, src, imm8); |
|
1312 } else if (UseAVX > 1) { |
|
1313 // vinserti128 is available only in AVX2 |
|
1314 Assembler::vinserti128(dst, nds, src, imm8); |
|
1315 } else { |
|
1316 Assembler::vinsertf128(dst, nds, src, imm8); |
|
1317 } |
|
1318 } |
|
1319 |
|
1320 void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { |
|
1321 if (UseAVX > 2) { |
|
1322 Assembler::vinserti32x4(dst, dst, src, imm8); |
|
1323 } else if (UseAVX > 1) { |
|
1324 // vinserti128 is available only in AVX2 |
|
1325 Assembler::vinserti128(dst, nds, src, imm8); |
|
1326 } else { |
|
1327 Assembler::vinsertf128(dst, nds, src, imm8); |
|
1328 } |
|
1329 } |
|
1330 |
|
1331 void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) { |
|
1332 if (UseAVX > 2) { |
|
1333 Assembler::vextracti32x4(dst, src, imm8); |
|
1334 } else if (UseAVX > 1) { |
|
1335 // vextracti128 is available only in AVX2 |
|
1336 Assembler::vextracti128(dst, src, imm8); |
|
1337 } else { |
|
1338 Assembler::vextractf128(dst, src, imm8); |
|
1339 } |
|
1340 } |
|
1341 |
|
1342 void vextracti128(Address dst, XMMRegister src, uint8_t imm8) { |
|
1343 if (UseAVX > 2) { |
|
1344 Assembler::vextracti32x4(dst, src, imm8); |
|
1345 } else if (UseAVX > 1) { |
|
1346 // vextracti128 is available only in AVX2 |
|
1347 Assembler::vextracti128(dst, src, imm8); |
|
1348 } else { |
|
1349 Assembler::vextractf128(dst, src, imm8); |
|
1350 } |
|
1351 } |
|
1352 |
|
1353 // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers |
|
1354 void vinserti128_high(XMMRegister dst, XMMRegister src) { |
|
1355 vinserti128(dst, dst, src, 1); |
|
1356 } |
|
1357 void vinserti128_high(XMMRegister dst, Address src) { |
|
1358 vinserti128(dst, dst, src, 1); |
|
1359 } |
|
1360 void vextracti128_high(XMMRegister dst, XMMRegister src) { |
|
1361 vextracti128(dst, src, 1); |
|
1362 } |
|
1363 void vextracti128_high(Address dst, XMMRegister src) { |
|
1364 vextracti128(dst, src, 1); |
|
1365 } |
|
1366 |
|
1367 void vinsertf128_high(XMMRegister dst, XMMRegister src) { |
|
1368 if (UseAVX > 2) { |
|
1369 Assembler::vinsertf32x4(dst, dst, src, 1); |
|
1370 } else { |
|
1371 Assembler::vinsertf128(dst, dst, src, 1); |
|
1372 } |
|
1373 } |
|
1374 |
|
1375 void vinsertf128_high(XMMRegister dst, Address src) { |
|
1376 if (UseAVX > 2) { |
|
1377 Assembler::vinsertf32x4(dst, dst, src, 1); |
|
1378 } else { |
|
1379 Assembler::vinsertf128(dst, dst, src, 1); |
|
1380 } |
|
1381 } |
|
1382 |
|
1383 void vextractf128_high(XMMRegister dst, XMMRegister src) { |
|
1384 if (UseAVX > 2) { |
|
1385 Assembler::vextractf32x4(dst, src, 1); |
|
1386 } else { |
|
1387 Assembler::vextractf128(dst, src, 1); |
|
1388 } |
|
1389 } |
|
1390 |
|
1391 void vextractf128_high(Address dst, XMMRegister src) { |
|
1392 if (UseAVX > 2) { |
|
1393 Assembler::vextractf32x4(dst, src, 1); |
|
1394 } else { |
|
1395 Assembler::vextractf128(dst, src, 1); |
|
1396 } |
|
1397 } |
|
1398 |
|
1399 // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers |
|
1400 void vinserti64x4_high(XMMRegister dst, XMMRegister src) { |
|
1401 Assembler::vinserti64x4(dst, dst, src, 1); |
|
1402 } |
|
1403 void vinsertf64x4_high(XMMRegister dst, XMMRegister src) { |
|
1404 Assembler::vinsertf64x4(dst, dst, src, 1); |
|
1405 } |
|
1406 void vextracti64x4_high(XMMRegister dst, XMMRegister src) { |
|
1407 Assembler::vextracti64x4(dst, src, 1); |
|
1408 } |
|
1409 void vextractf64x4_high(XMMRegister dst, XMMRegister src) { |
|
1410 Assembler::vextractf64x4(dst, src, 1); |
|
1411 } |
|
1412 void vextractf64x4_high(Address dst, XMMRegister src) { |
|
1413 Assembler::vextractf64x4(dst, src, 1); |
|
1414 } |
|
1415 void vinsertf64x4_high(XMMRegister dst, Address src) { |
|
1416 Assembler::vinsertf64x4(dst, dst, src, 1); |
|
1417 } |
|
1418 |
|
1419 // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers |
|
1420 void vinserti128_low(XMMRegister dst, XMMRegister src) { |
|
1421 vinserti128(dst, dst, src, 0); |
|
1422 } |
|
1423 void vinserti128_low(XMMRegister dst, Address src) { |
|
1424 vinserti128(dst, dst, src, 0); |
|
1425 } |
|
1426 void vextracti128_low(XMMRegister dst, XMMRegister src) { |
|
1427 vextracti128(dst, src, 0); |
|
1428 } |
|
1429 void vextracti128_low(Address dst, XMMRegister src) { |
|
1430 vextracti128(dst, src, 0); |
|
1431 } |
|
1432 |
|
1433 void vinsertf128_low(XMMRegister dst, XMMRegister src) { |
|
1434 if (UseAVX > 2) { |
|
1435 Assembler::vinsertf32x4(dst, dst, src, 0); |
|
1436 } else { |
|
1437 Assembler::vinsertf128(dst, dst, src, 0); |
|
1438 } |
|
1439 } |
|
1440 |
|
1441 void vinsertf128_low(XMMRegister dst, Address src) { |
|
1442 if (UseAVX > 2) { |
|
1443 Assembler::vinsertf32x4(dst, dst, src, 0); |
|
1444 } else { |
|
1445 Assembler::vinsertf128(dst, dst, src, 0); |
|
1446 } |
|
1447 } |
|
1448 |
|
1449 void vextractf128_low(XMMRegister dst, XMMRegister src) { |
|
1450 if (UseAVX > 2) { |
|
1451 Assembler::vextractf32x4(dst, src, 0); |
|
1452 } else { |
|
1453 Assembler::vextractf128(dst, src, 0); |
|
1454 } |
|
1455 } |
|
1456 |
|
1457 void vextractf128_low(Address dst, XMMRegister src) { |
|
1458 if (UseAVX > 2) { |
|
1459 Assembler::vextractf32x4(dst, src, 0); |
|
1460 } else { |
|
1461 Assembler::vextractf128(dst, src, 0); |
|
1462 } |
|
1463 } |
|
1464 |
|
1465 // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers |
|
1466 void vinserti64x4_low(XMMRegister dst, XMMRegister src) { |
|
1467 Assembler::vinserti64x4(dst, dst, src, 0); |
|
1468 } |
|
1469 void vinsertf64x4_low(XMMRegister dst, XMMRegister src) { |
|
1470 Assembler::vinsertf64x4(dst, dst, src, 0); |
|
1471 } |
|
1472 void vextracti64x4_low(XMMRegister dst, XMMRegister src) { |
|
1473 Assembler::vextracti64x4(dst, src, 0); |
|
1474 } |
|
1475 void vextractf64x4_low(XMMRegister dst, XMMRegister src) { |
|
1476 Assembler::vextractf64x4(dst, src, 0); |
|
1477 } |
|
1478 void vextractf64x4_low(Address dst, XMMRegister src) { |
|
1479 Assembler::vextractf64x4(dst, src, 0); |
|
1480 } |
|
1481 void vinsertf64x4_low(XMMRegister dst, Address src) { |
|
1482 Assembler::vinsertf64x4(dst, dst, src, 0); |
|
1483 } |
|
1484 |
|
1485 // Carry-Less Multiplication Quadword |
|
1486 void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) { |
|
1487 // 0x00 - multiply lower 64 bits [0:63] |
|
1488 Assembler::vpclmulqdq(dst, nds, src, 0x00); |
|
1489 } |
|
1490 void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { |
|
1491 // 0x11 - multiply upper 64 bits [64:127] |
|
1492 Assembler::vpclmulqdq(dst, nds, src, 0x11); |
|
1493 } |
|
1494 |
|
1495 // Data |
|
1496 |
|
1497 void cmov32( Condition cc, Register dst, Address src); |
|
1498 void cmov32( Condition cc, Register dst, Register src); |
|
1499 |
|
1500 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); } |
|
1501 |
|
1502 void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } |
|
1503 void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } |
|
1504 |
|
1505 void movoop(Register dst, jobject obj); |
|
1506 void movoop(Address dst, jobject obj); |
|
1507 |
|
1508 void mov_metadata(Register dst, Metadata* obj); |
|
1509 void mov_metadata(Address dst, Metadata* obj); |
|
1510 |
|
1511 void movptr(ArrayAddress dst, Register src); |
|
1512 // can this do an lea? |
|
1513 void movptr(Register dst, ArrayAddress src); |
|
1514 |
|
1515 void movptr(Register dst, Address src); |
|
1516 |
|
1517 #ifdef _LP64 |
|
1518 void movptr(Register dst, AddressLiteral src, Register scratch=rscratch1); |
|
1519 #else |
|
1520 void movptr(Register dst, AddressLiteral src, Register scratch=noreg); // Scratch reg is ignored in 32-bit |
|
1521 #endif |
|
1522 |
|
1523 void movptr(Register dst, intptr_t src); |
|
1524 void movptr(Register dst, Register src); |
|
1525 void movptr(Address dst, intptr_t src); |
|
1526 |
|
1527 void movptr(Address dst, Register src); |
|
1528 |
|
1529 void movptr(Register dst, RegisterOrConstant src) { |
|
1530 if (src.is_constant()) movptr(dst, src.as_constant()); |
|
1531 else movptr(dst, src.as_register()); |
|
1532 } |
|
1533 |
|
1534 #ifdef _LP64 |
|
1535 // Generally the next two are only used for moving NULL |
|
1536 // Although there are situations in initializing the mark word where |
|
1537 // they could be used. They are dangerous. |
|
1538 |
|
1539 // They only exist on LP64 so that int32_t and intptr_t are not the same |
|
1540 // and we have ambiguous declarations. |
|
1541 |
|
1542 void movptr(Address dst, int32_t imm32); |
|
1543 void movptr(Register dst, int32_t imm32); |
|
1544 #endif // _LP64 |
|
1545 |
|
1546 // to avoid hiding movl |
|
1547 void mov32(AddressLiteral dst, Register src); |
|
1548 void mov32(Register dst, AddressLiteral src); |
|
1549 |
|
1550 // to avoid hiding movb |
|
1551 void movbyte(ArrayAddress dst, int src); |
|
1552 |
|
1553 // Import other mov() methods from the parent class or else |
|
1554 // they will be hidden by the following overriding declaration. |
|
1555 using Assembler::movdl; |
|
1556 using Assembler::movq; |
|
1557 void movdl(XMMRegister dst, AddressLiteral src); |
|
1558 void movq(XMMRegister dst, AddressLiteral src); |
|
1559 |
|
1560 // Can push value or effective address |
|
1561 void pushptr(AddressLiteral src); |
|
1562 |
|
1563 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); } |
|
1564 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); } |
|
1565 |
|
1566 void pushoop(jobject obj); |
|
1567 void pushklass(Metadata* obj); |
|
1568 |
|
1569 // sign extend as need a l to ptr sized element |
|
1570 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); } |
|
1571 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); } |
|
1572 |
|
1573 // C2 compiled method's prolog code. |
|
1574 void verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b); |
|
1575 |
|
1576 // clear memory of size 'cnt' qwords, starting at 'base'; |
|
1577 // if 'is_large' is set, do not try to produce short loop |
|
1578 void clear_mem(Register base, Register cnt, Register rtmp, bool is_large); |
|
1579 |
|
1580 #ifdef COMPILER2 |
|
1581 void string_indexof_char(Register str1, Register cnt1, Register ch, Register result, |
|
1582 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3, Register tmp); |
|
1583 |
|
1584 // IndexOf strings. |
|
1585 // Small strings are loaded through stack if they cross page boundary. |
|
1586 void string_indexof(Register str1, Register str2, |
|
1587 Register cnt1, Register cnt2, |
|
1588 int int_cnt2, Register result, |
|
1589 XMMRegister vec, Register tmp, |
|
1590 int ae); |
|
1591 |
|
1592 // IndexOf for constant substrings with size >= 8 elements |
|
1593 // which don't need to be loaded through stack. |
|
1594 void string_indexofC8(Register str1, Register str2, |
|
1595 Register cnt1, Register cnt2, |
|
1596 int int_cnt2, Register result, |
|
1597 XMMRegister vec, Register tmp, |
|
1598 int ae); |
|
1599 |
|
1600 // Smallest code: we don't need to load through stack, |
|
1601 // check string tail. |
|
1602 |
|
1603 // helper function for string_compare |
|
1604 void load_next_elements(Register elem1, Register elem2, Register str1, Register str2, |
|
1605 Address::ScaleFactor scale, Address::ScaleFactor scale1, |
|
1606 Address::ScaleFactor scale2, Register index, int ae); |
|
1607 // Compare strings. |
|
1608 void string_compare(Register str1, Register str2, |
|
1609 Register cnt1, Register cnt2, Register result, |
|
1610 XMMRegister vec1, int ae); |
|
1611 |
|
1612 // Search for Non-ASCII character (Negative byte value) in a byte array, |
|
1613 // return true if it has any and false otherwise. |
|
1614 void has_negatives(Register ary1, Register len, |
|
1615 Register result, Register tmp1, |
|
1616 XMMRegister vec1, XMMRegister vec2); |
|
1617 |
|
1618 // Compare char[] or byte[] arrays. |
|
1619 void arrays_equals(bool is_array_equ, Register ary1, Register ary2, |
|
1620 Register limit, Register result, Register chr, |
|
1621 XMMRegister vec1, XMMRegister vec2, bool is_char); |
|
1622 |
|
1623 #endif |
|
1624 |
|
1625 // Fill primitive arrays |
|
1626 void generate_fill(BasicType t, bool aligned, |
|
1627 Register to, Register value, Register count, |
|
1628 Register rtmp, XMMRegister xtmp); |
|
1629 |
|
1630 void encode_iso_array(Register src, Register dst, Register len, |
|
1631 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, |
|
1632 XMMRegister tmp4, Register tmp5, Register result); |
|
1633 |
|
1634 #ifdef _LP64 |
|
1635 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2); |
|
1636 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, |
|
1637 Register y, Register y_idx, Register z, |
|
1638 Register carry, Register product, |
|
1639 Register idx, Register kdx); |
|
1640 void multiply_add_128_x_128(Register x_xstart, Register y, Register z, |
|
1641 Register yz_idx, Register idx, |
|
1642 Register carry, Register product, int offset); |
|
1643 void multiply_128_x_128_bmi2_loop(Register y, Register z, |
|
1644 Register carry, Register carry2, |
|
1645 Register idx, Register jdx, |
|
1646 Register yz_idx1, Register yz_idx2, |
|
1647 Register tmp, Register tmp3, Register tmp4); |
|
1648 void multiply_128_x_128_loop(Register x_xstart, Register y, Register z, |
|
1649 Register yz_idx, Register idx, Register jdx, |
|
1650 Register carry, Register product, |
|
1651 Register carry2); |
|
1652 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen, |
|
1653 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5); |
|
1654 void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3, |
|
1655 Register tmp4, Register tmp5, Register rdxReg, Register raxReg); |
|
1656 void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, |
|
1657 Register tmp2); |
|
1658 void multiply_add_64(Register sum, Register op1, Register op2, Register carry, |
|
1659 Register rdxReg, Register raxReg); |
|
1660 void add_one_64(Register z, Register zlen, Register carry, Register tmp1); |
|
1661 void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, |
|
1662 Register tmp3, Register tmp4); |
|
1663 void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, |
|
1664 Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg); |
|
1665 |
|
1666 void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1, |
|
1667 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, |
|
1668 Register raxReg); |
|
1669 void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1, |
|
1670 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, |
|
1671 Register raxReg); |
|
1672 void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, |
|
1673 Register result, Register tmp1, Register tmp2, |
|
1674 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3); |
|
1675 #endif |
|
1676 |
|
1677 // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic. |
|
1678 void update_byte_crc32(Register crc, Register val, Register table); |
|
1679 void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp); |
|
1680 // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic |
|
1681 // Note on a naming convention: |
|
1682 // Prefix w = register only used on a Westmere+ architecture |
|
1683 // Prefix n = register only used on a Nehalem architecture |
|
1684 #ifdef _LP64 |
|
1685 void crc32c_ipl_alg4(Register in_out, uint32_t n, |
|
1686 Register tmp1, Register tmp2, Register tmp3); |
|
1687 #else |
|
1688 void crc32c_ipl_alg4(Register in_out, uint32_t n, |
|
1689 Register tmp1, Register tmp2, Register tmp3, |
|
1690 XMMRegister xtmp1, XMMRegister xtmp2); |
|
1691 #endif |
|
1692 void crc32c_pclmulqdq(XMMRegister w_xtmp1, |
|
1693 Register in_out, |
|
1694 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, |
|
1695 XMMRegister w_xtmp2, |
|
1696 Register tmp1, |
|
1697 Register n_tmp2, Register n_tmp3); |
|
1698 void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, |
|
1699 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, |
|
1700 Register tmp1, Register tmp2, |
|
1701 Register n_tmp3); |
|
1702 void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, |
|
1703 Register in_out1, Register in_out2, Register in_out3, |
|
1704 Register tmp1, Register tmp2, Register tmp3, |
|
1705 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, |
|
1706 Register tmp4, Register tmp5, |
|
1707 Register n_tmp6); |
|
1708 void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, |
|
1709 Register tmp1, Register tmp2, Register tmp3, |
|
1710 Register tmp4, Register tmp5, Register tmp6, |
|
1711 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, |
|
1712 bool is_pclmulqdq_supported); |
|
1713 // Fold 128-bit data chunk |
|
1714 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset); |
|
1715 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf); |
|
1716 // Fold 8-bit data |
|
1717 void fold_8bit_crc32(Register crc, Register table, Register tmp); |
|
1718 void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp); |
|
1719 |
|
1720 // Compress char[] array to byte[]. |
|
1721 void char_array_compress(Register src, Register dst, Register len, |
|
1722 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, |
|
1723 XMMRegister tmp4, Register tmp5, Register result); |
|
1724 |
|
1725 // Inflate byte[] array to char[]. |
|
1726 void byte_array_inflate(Register src, Register dst, Register len, |
|
1727 XMMRegister tmp1, Register tmp2); |
|
1728 |
|
1729 }; |
|
1730 |
|
1731 /** |
|
1732 * class SkipIfEqual: |
|
1733 * |
|
1734 * Instantiating this class will result in assembly code being output that will |
|
1735 * jump around any code emitted between the creation of the instance and it's |
|
1736 * automatic destruction at the end of a scope block, depending on the value of |
|
1737 * the flag passed to the constructor, which will be checked at run-time. |
|
1738 */ |
|
1739 class SkipIfEqual { |
|
1740 private: |
|
1741 MacroAssembler* _masm; |
|
1742 Label _label; |
|
1743 |
|
1744 public: |
|
1745 SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value); |
|
1746 ~SkipIfEqual(); |
|
1747 }; |
|
1748 |
|
1749 #endif // CPU_X86_VM_MACROASSEMBLER_X86_HPP |