author | mbaesken |
Wed, 06 Nov 2019 14:04:07 +0100 | |
changeset 58959 | b7b170ba3ba9 |
parent 55343 | 03d417fd7d9a |
permissions | -rw-r--r-- |
42065 | 1 |
/* |
53244
9807daeb47c4
8216167: Update include guards to reflect correct directories
coleenp
parents:
52760
diff
changeset
|
2 |
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. |
55342
596ae6c3ef6f
8223249: [s390] Cleanup TemplateInterpreterGenerator::generate_fixed_frame
mdoerr
parents:
54542
diff
changeset
|
3 |
* Copyright (c) 2016, 2019, SAP SE. All rights reserved. |
42065 | 4 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
5 |
* |
|
6 |
* This code is free software; you can redistribute it and/or modify it |
|
7 |
* under the terms of the GNU General Public License version 2 only, as |
|
8 |
* published by the Free Software Foundation. |
|
9 |
* |
|
10 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
11 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
12 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
13 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
14 |
* accompanied this code). |
|
15 |
* |
|
16 |
* You should have received a copy of the GNU General Public License version |
|
17 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
18 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
19 |
* |
|
20 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
21 |
* or visit www.oracle.com if you need additional information or have any |
|
22 |
* questions. |
|
23 |
* |
|
24 |
*/ |
|
25 |
||
53244
9807daeb47c4
8216167: Update include guards to reflect correct directories
coleenp
parents:
52760
diff
changeset
|
26 |
#ifndef CPU_S390_MACROASSEMBLER_S390_HPP |
9807daeb47c4
8216167: Update include guards to reflect correct directories
coleenp
parents:
52760
diff
changeset
|
27 |
#define CPU_S390_MACROASSEMBLER_S390_HPP |
42065 | 28 |
|
29 |
#include "asm/assembler.hpp" |
|
50162
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49748
diff
changeset
|
30 |
#include "oops/accessDecorators.hpp" |
42065 | 31 |
|
32 |
#define MODERN_IFUN(name) ((void (MacroAssembler::*)(Register, int64_t, Register, Register))&MacroAssembler::name) |
|
33 |
#define CLASSIC_IFUN(name) ((void (MacroAssembler::*)(Register, int64_t, Register, Register))&MacroAssembler::name) |
|
34 |
#define MODERN_FFUN(name) ((void (MacroAssembler::*)(FloatRegister, int64_t, Register, Register))&MacroAssembler::name) |
|
35 |
#define CLASSIC_FFUN(name) ((void (MacroAssembler::*)(FloatRegister, int64_t, Register, Register))&MacroAssembler::name) |
|
36 |
||
37 |
class MacroAssembler: public Assembler { |
|
38 |
public: |
|
39 |
MacroAssembler(CodeBuffer* code) : Assembler(code) {} |
|
40 |
||
41 |
// |
|
42 |
// Optimized instruction emitters |
|
43 |
// |
|
44 |
||
45 |
// Move register if destination register and target register are different. |
|
46 |
void lr_if_needed(Register rd, Register rs); |
|
47 |
void lgr_if_needed(Register rd, Register rs); |
|
48 |
void llgfr_if_needed(Register rd, Register rs); |
|
49 |
void ldr_if_needed(FloatRegister rd, FloatRegister rs); |
|
50 |
||
51 |
void move_reg_if_needed(Register dest, BasicType dest_type, Register src, BasicType src_type); |
|
52 |
void move_freg_if_needed(FloatRegister dest, BasicType dest_type, FloatRegister src, BasicType src_type); |
|
53 |
||
54 |
void freg2mem_opt(FloatRegister reg, |
|
55 |
int64_t disp, |
|
56 |
Register index, |
|
57 |
Register base, |
|
58 |
void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register), |
|
59 |
void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register), |
|
60 |
Register scratch = Z_R0); |
|
61 |
void freg2mem_opt(FloatRegister reg, |
|
62 |
const Address &a, bool is_double = true); |
|
63 |
||
64 |
void mem2freg_opt(FloatRegister reg, |
|
65 |
int64_t disp, |
|
66 |
Register index, |
|
67 |
Register base, |
|
68 |
void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register), |
|
69 |
void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register), |
|
70 |
Register scratch = Z_R0); |
|
71 |
void mem2freg_opt(FloatRegister reg, |
|
72 |
const Address &a, bool is_double = true); |
|
73 |
||
74 |
void reg2mem_opt(Register reg, |
|
75 |
int64_t disp, |
|
76 |
Register index, |
|
77 |
Register base, |
|
78 |
void (MacroAssembler::*modern) (Register, int64_t, Register, Register), |
|
79 |
void (MacroAssembler::*classic)(Register, int64_t, Register, Register), |
|
80 |
Register scratch = Z_R0); |
|
81 |
// returns offset of the store instruction |
|
82 |
int reg2mem_opt(Register reg, const Address &a, bool is_double = true); |
|
83 |
||
84 |
void mem2reg_opt(Register reg, |
|
85 |
int64_t disp, |
|
86 |
Register index, |
|
87 |
Register base, |
|
88 |
void (MacroAssembler::*modern) (Register, int64_t, Register, Register), |
|
89 |
void (MacroAssembler::*classic)(Register, int64_t, Register, Register)); |
|
90 |
void mem2reg_opt(Register reg, const Address &a, bool is_double = true); |
|
91 |
void mem2reg_signed_opt(Register reg, const Address &a); |
|
92 |
||
93 |
// AND immediate and set condition code, works for 64 bit immediates/operation as well. |
|
94 |
void and_imm(Register r, long mask, Register tmp = Z_R0, bool wide = false); |
|
95 |
||
96 |
// 1's complement, 32bit or 64bit. Optimized to exploit distinct operands facility. |
|
97 |
// Note: The condition code is neither preserved nor correctly set by this code!!! |
|
98 |
// Note: (wide == false) does not protect the high order half of the target register |
|
99 |
// from alternation. It only serves as optimization hint for 32-bit results. |
|
100 |
void not_(Register r1, Register r2 = noreg, bool wide = false); // r1 = ~r2 |
|
101 |
||
102 |
// Expanded support of all "rotate_then_<logicalOP>" instructions. |
|
103 |
// |
|
104 |
// Generalize and centralize rotate_then_<logicalOP> emitter. |
|
105 |
// Functional description. For details, see Principles of Operation, Chapter 7, "Rotate Then Insert..." |
|
106 |
// - Bits in a register are numbered left (most significant) to right (least significant), i.e. [0..63]. |
|
107 |
// - Bytes in a register are numbered left (most significant) to right (least significant), i.e. [0..7]. |
|
108 |
// - Register src is rotated to the left by (nRotate&0x3f) positions. |
|
109 |
// - Negative values for nRotate result in a rotation to the right by abs(nRotate) positions. |
|
110 |
// - The bits in positions [lBitPos..rBitPos] of the _ROTATED_ src operand take part in the |
|
111 |
// logical operation performed on the contents (in those positions) of the dst operand. |
|
112 |
// - The logical operation that is performed on the dst operand is one of |
|
113 |
// o insert the selected bits (replacing the original contents of those bit positions) |
|
114 |
// o and the selected bits with the corresponding bits of the dst operand |
|
115 |
// o or the selected bits with the corresponding bits of the dst operand |
|
116 |
// o xor the selected bits with the corresponding bits of the dst operand |
|
117 |
// - For clear_dst == true, the destination register is cleared before the bits are inserted. |
|
118 |
// For clear_dst == false, only the bit positions that get data inserted from src |
|
119 |
// are changed. All other bit positions remain unchanged. |
|
120 |
// - For test_only == true, the result of the logicalOP is only used to set the condition code, dst remains unchanged. |
|
121 |
// For test_only == false, the result of the logicalOP replaces the selected bits of dst. |
|
122 |
// - src32bit and dst32bit indicate the respective register is used as 32bit value only. |
|
123 |
// Knowledge can simplify code generation. |
|
124 |
// |
|
125 |
// Here is an important performance note, valid for all <logicalOP>s except "insert": |
|
126 |
// Due to the too complex nature of the operation, it cannot be done in a single cycle. |
|
127 |
// Timing constraints require the instructions to be cracked into two micro-ops, taking |
|
128 |
// one or two cycles each to execute. In some cases, an additional pipeline bubble might get added. |
|
129 |
// Macroscopically, that makes up for a three- or four-cycle instruction where you would |
|
130 |
// expect just a single cycle. |
|
131 |
// It is thus not beneficial from a performance point of view to exploit those instructions. |
|
132 |
// Other reasons (code compactness, register pressure, ...) might outweigh this penalty. |
|
133 |
// |
|
134 |
unsigned long create_mask(int lBitPos, int rBitPos); |
|
135 |
void rotate_then_mask(Register dst, Register src, int lBitPos, int rBitPos, |
|
136 |
int nRotate, bool src32bit, bool dst32bit, bool oneBits); |
|
137 |
void rotate_then_insert(Register dst, Register src, int lBitPos, int rBitPos, int nRotate, |
|
138 |
bool clear_dst); |
|
139 |
void rotate_then_and(Register dst, Register src, int lBitPos, int rBitPos, int nRotate, |
|
140 |
bool test_only); |
|
141 |
void rotate_then_or(Register dst, Register src, int lBitPos, int rBitPos, int nRotate, |
|
142 |
bool test_onlyt); |
|
143 |
void rotate_then_xor(Register dst, Register src, int lBitPos, int rBitPos, int nRotate, |
|
144 |
bool test_only); |
|
145 |
||
146 |
void add64(Register r1, RegisterOrConstant inc); |
|
147 |
||
148 |
// Helper function to multiply the 64bit contents of a register by a 16bit constant. |
|
149 |
// The optimization tries to avoid the mghi instruction, since it uses the FPU for |
|
150 |
// calculation and is thus rather slow. |
|
151 |
// |
|
152 |
// There is no handling for special cases, e.g. cval==0 or cval==1. |
|
153 |
// |
|
154 |
// Returns len of generated code block. |
|
155 |
unsigned int mul_reg64_const16(Register rval, Register work, int cval); |
|
156 |
||
157 |
// Generic operation r1 := r2 + imm. |
|
158 |
void add2reg(Register r1, int64_t imm, Register r2 = noreg); |
|
159 |
// Generic operation r := b + x + d. |
|
160 |
void add2reg_with_index(Register r, int64_t d, Register x, Register b = noreg); |
|
161 |
||
162 |
// Add2mem* methods for direct memory increment. |
|
163 |
void add2mem_32(const Address &a, int64_t imm, Register tmp); |
|
164 |
void add2mem_64(const Address &a, int64_t imm, Register tmp); |
|
165 |
||
166 |
// *((int8_t*)(dst)) |= imm8 |
|
167 |
inline void or2mem_8(Address& dst, int64_t imm8); |
|
168 |
||
169 |
// Load values by size and signedness. |
|
170 |
void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed); |
|
171 |
void store_sized_value(Register src, Address dst, size_t size_in_bytes); |
|
172 |
||
173 |
// Load values with large offsets to base address. |
|
174 |
private: |
|
175 |
int split_largeoffset(int64_t si20_offset, Register tmp, bool fixed_codelen, bool accumulate); |
|
176 |
public: |
|
177 |
void load_long_largeoffset(Register t, int64_t si20, Register a, Register tmp); |
|
178 |
void load_float_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp); |
|
179 |
void load_double_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp); |
|
180 |
||
181 |
private: |
|
182 |
long toc_distance(); |
|
183 |
public: |
|
184 |
void load_toc(Register Rtoc); |
|
185 |
void load_long_pcrelative(Register Rdst, address dataLocation); |
|
186 |
static int load_long_pcrelative_size() { return 6; } |
|
187 |
void load_addr_pcrelative(Register Rdst, address dataLocation); |
|
188 |
static int load_addr_pcrel_size() { return 6; } // Just a LARL. |
|
189 |
||
190 |
// Load a value from memory and test (set CC). |
|
191 |
void load_and_test_byte (Register dst, const Address &a); |
|
192 |
void load_and_test_short (Register dst, const Address &a); |
|
193 |
void load_and_test_int (Register dst, const Address &a); |
|
194 |
void load_and_test_int2long(Register dst, const Address &a); |
|
195 |
void load_and_test_long (Register dst, const Address &a); |
|
196 |
||
197 |
// Test a bit in memory. Result is reflected in CC. |
|
198 |
void testbit(const Address &a, unsigned int bit); |
|
199 |
// Test a bit in a register. Result is reflected in CC. |
|
200 |
void testbit(Register r, unsigned int bitPos); |
|
201 |
||
48094
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47216
diff
changeset
|
202 |
void prefetch_read(Address a); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47216
diff
changeset
|
203 |
void prefetch_update(Address a); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47216
diff
changeset
|
204 |
|
42065 | 205 |
// Clear a register, i.e. load const zero into reg. Return len (in bytes) of |
206 |
// generated instruction(s). |
|
207 |
// whole_reg: Clear 64 bits if true, 32 bits otherwise. |
|
208 |
// set_cc: Use instruction that sets the condition code, if true. |
|
209 |
int clear_reg(Register r, bool whole_reg = true, bool set_cc = true); |
|
210 |
||
211 |
#ifdef ASSERT |
|
212 |
int preset_reg(Register r, unsigned long pattern, int pattern_len); |
|
213 |
#endif |
|
214 |
||
215 |
// Clear (store zeros) a small piece of memory. |
|
216 |
// CAUTION: Do not use this for atomic memory clearing. Use store_const() instead. |
|
217 |
// addr: Address descriptor of memory to clear. |
|
218 |
// Index register will not be used! |
|
219 |
// size: Number of bytes to clear. |
|
220 |
void clear_mem(const Address& addr, unsigned size); |
|
221 |
||
222 |
// Move immediate values to memory. Currently supports 32 and 64 bit stores, |
|
223 |
// but may be extended to 16 bit store operation, if needed. |
|
224 |
// For details, see implementation in *.cpp file. |
|
225 |
int store_const(const Address &dest, long imm, |
|
226 |
unsigned int lm, unsigned int lc, |
|
227 |
Register scratch = Z_R0); |
|
228 |
inline int store_const(const Address &dest, long imm, |
|
229 |
Register scratch = Z_R0, bool is_long = true); |
|
230 |
||
231 |
// Move/initialize arbitrarily large memory area. No check for destructive overlap. |
|
232 |
// Being interruptible, these instructions need a retry-loop. |
|
233 |
void move_long_ext(Register dst, Register src, unsigned int pad); |
|
234 |
||
235 |
void compare_long_ext(Register left, Register right, unsigned int pad); |
|
236 |
void compare_long_uni(Register left, Register right, unsigned int pad); |
|
237 |
||
238 |
void search_string(Register end, Register start); |
|
239 |
void search_string_uni(Register end, Register start); |
|
240 |
||
241 |
// Translate instructions |
|
242 |
// Being interruptible, these instructions need a retry-loop. |
|
243 |
void translate_oo(Register dst, Register src, uint mask); |
|
244 |
void translate_ot(Register dst, Register src, uint mask); |
|
245 |
void translate_to(Register dst, Register src, uint mask); |
|
246 |
void translate_tt(Register dst, Register src, uint mask); |
|
247 |
||
248 |
// Crypto instructions. |
|
249 |
// Being interruptible, these instructions need a retry-loop. |
|
250 |
void cksm(Register crcBuff, Register srcBuff); |
|
251 |
void km( Register dstBuff, Register srcBuff); |
|
252 |
void kmc(Register dstBuff, Register srcBuff); |
|
253 |
void kimd(Register srcBuff); |
|
254 |
void klmd(Register srcBuff); |
|
255 |
void kmac(Register srcBuff); |
|
256 |
||
257 |
// nop padding |
|
258 |
void align(int modulus); |
|
259 |
void align_address(int modulus); |
|
260 |
||
261 |
// |
|
262 |
// Constants, loading constants, TOC support |
|
263 |
// |
|
264 |
||
265 |
// Load generic address: d <- base(a) + index(a) + disp(a). |
|
266 |
inline void load_address(Register d, const Address &a); |
|
267 |
// Load absolute address (and try to optimize). |
|
268 |
void load_absolute_address(Register d, address addr); |
|
269 |
||
270 |
// Address of Z_ARG1 and argument_offset. |
|
271 |
// If temp_reg == arg_slot, arg_slot will be overwritten. |
|
272 |
Address argument_address(RegisterOrConstant arg_slot, |
|
273 |
Register temp_reg = noreg, |
|
274 |
int64_t extra_slot_offset = 0); |
|
275 |
||
276 |
// Load a narrow ptr constant (oop or klass ptr). |
|
277 |
void load_narrow_oop( Register t, narrowOop a); |
|
278 |
void load_narrow_klass(Register t, Klass* k); |
|
279 |
||
280 |
static bool is_load_const_32to64(address pos); |
|
281 |
static bool is_load_narrow_oop(address pos) { return is_load_const_32to64(pos); } |
|
282 |
static bool is_load_narrow_klass(address pos) { return is_load_const_32to64(pos); } |
|
283 |
||
284 |
static int load_const_32to64_size() { return 6; } |
|
285 |
static bool load_narrow_oop_size() { return load_const_32to64_size(); } |
|
286 |
static bool load_narrow_klass_size() { return load_const_32to64_size(); } |
|
287 |
||
288 |
static int patch_load_const_32to64(address pos, int64_t a); |
|
289 |
static int patch_load_narrow_oop(address pos, oop o); |
|
290 |
static int patch_load_narrow_klass(address pos, Klass* k); |
|
291 |
||
292 |
// cOops. CLFI exploit. |
|
293 |
void compare_immediate_narrow_oop(Register oop1, narrowOop oop2); |
|
294 |
void compare_immediate_narrow_klass(Register op1, Klass* op2); |
|
295 |
static bool is_compare_immediate32(address pos); |
|
296 |
static bool is_compare_immediate_narrow_oop(address pos); |
|
297 |
static bool is_compare_immediate_narrow_klass(address pos); |
|
298 |
static int compare_immediate_narrow_size() { return 6; } |
|
299 |
static int compare_immediate_narrow_oop_size() { return compare_immediate_narrow_size(); } |
|
300 |
static int compare_immediate_narrow_klass_size() { return compare_immediate_narrow_size(); } |
|
301 |
static int patch_compare_immediate_32(address pos, int64_t a); |
|
302 |
static int patch_compare_immediate_narrow_oop(address pos, oop o); |
|
303 |
static int patch_compare_immediate_narrow_klass(address pos, Klass* k); |
|
304 |
||
305 |
// Load a 32bit constant into a 64bit register. |
|
306 |
void load_const_32to64(Register t, int64_t x, bool sign_extend=true); |
|
307 |
// Load a 64 bit constant. |
|
308 |
void load_const(Register t, long a); |
|
309 |
inline void load_const(Register t, void* a); |
|
310 |
inline void load_const(Register t, Label& L); |
|
311 |
inline void load_const(Register t, const AddressLiteral& a); |
|
312 |
// Get the 64 bit constant from a `load_const' sequence. |
|
313 |
static long get_const(address load_const); |
|
314 |
// Patch the 64 bit constant of a `load_const' sequence. This is a low level |
|
315 |
// procedure. It neither flushes the instruction cache nor is it atomic. |
|
316 |
static void patch_const(address load_const, long x); |
|
317 |
static int load_const_size() { return 12; } |
|
318 |
||
319 |
// Turn a char into boolean. NOTE: destroys r. |
|
320 |
void c2bool(Register r, Register t = Z_R0); |
|
321 |
||
322 |
// Optimized version of load_const for constants that do not need to be |
|
323 |
// loaded by a sequence of instructions of fixed length and that do not |
|
324 |
// need to be patched. |
|
325 |
int load_const_optimized_rtn_len(Register t, long x, bool emit); |
|
326 |
inline void load_const_optimized(Register t, long x); |
|
327 |
inline void load_const_optimized(Register t, void* a); |
|
328 |
inline void load_const_optimized(Register t, Label& L); |
|
329 |
inline void load_const_optimized(Register t, const AddressLiteral& a); |
|
330 |
||
331 |
public: |
|
332 |
||
333 |
//---------------------------------------------------------- |
|
334 |
// oops in code ------------- |
|
335 |
// including compressed oops support ------------- |
|
336 |
//---------------------------------------------------------- |
|
337 |
||
338 |
// Metadata in code that we have to keep track of. |
|
339 |
AddressLiteral allocate_metadata_address(Metadata* obj); // allocate_index |
|
340 |
AddressLiteral constant_metadata_address(Metadata* obj); // find_index |
|
341 |
||
342 |
// allocate_index |
|
343 |
AddressLiteral allocate_oop_address(jobject obj); |
|
344 |
// find_index |
|
345 |
AddressLiteral constant_oop_address(jobject obj); |
|
346 |
// Uses allocate_oop_address. |
|
347 |
inline void set_oop (jobject obj, Register d); |
|
348 |
// Uses constant_oop_address. |
|
349 |
inline void set_oop_constant(jobject obj, Register d); |
|
350 |
// Uses constant_metadata_address. |
|
351 |
inline bool set_metadata_constant(Metadata* md, Register d); |
|
352 |
||
353 |
virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, |
|
354 |
Register tmp, |
|
355 |
int offset); |
|
356 |
// |
|
357 |
// branch, jump |
|
358 |
// |
|
359 |
||
360 |
// Use one generic function for all branch patches. |
|
361 |
static unsigned long patched_branch(address dest_pos, unsigned long inst, address inst_pos); |
|
362 |
||
51633
21154cb84d2a
8209594: guarantee(this->is8bit(imm8)) failed: Short forward jump exceeds 8-bit offset
kvn
parents:
50162
diff
changeset
|
363 |
void pd_patch_instruction(address branch, address target, const char* file, int line); |
42065 | 364 |
|
365 |
// Extract relative address from "relative" instructions. |
|
366 |
static long get_pcrel_offset(unsigned long inst); |
|
367 |
static long get_pcrel_offset(address pc); |
|
368 |
static address get_target_addr_pcrel(address pc); |
|
369 |
||
370 |
static inline bool is_call_pcrelative_short(unsigned long inst); |
|
371 |
static inline bool is_call_pcrelative_long(unsigned long inst); |
|
372 |
static inline bool is_branch_pcrelative_short(unsigned long inst); |
|
373 |
static inline bool is_branch_pcrelative_long(unsigned long inst); |
|
374 |
static inline bool is_compareandbranch_pcrelative_short(unsigned long inst); |
|
375 |
static inline bool is_branchoncount_pcrelative_short(unsigned long inst); |
|
376 |
static inline bool is_branchonindex32_pcrelative_short(unsigned long inst); |
|
377 |
static inline bool is_branchonindex64_pcrelative_short(unsigned long inst); |
|
378 |
static inline bool is_branchonindex_pcrelative_short(unsigned long inst); |
|
379 |
static inline bool is_branch_pcrelative16(unsigned long inst); |
|
380 |
static inline bool is_branch_pcrelative32(unsigned long inst); |
|
381 |
static inline bool is_branch_pcrelative(unsigned long inst); |
|
382 |
static inline bool is_load_pcrelative_long(unsigned long inst); |
|
383 |
static inline bool is_misc_pcrelative_long(unsigned long inst); |
|
384 |
static inline bool is_pcrelative_short(unsigned long inst); |
|
385 |
static inline bool is_pcrelative_long(unsigned long inst); |
|
386 |
// PCrelative TOC access. Variants with address argument. |
|
387 |
static inline bool is_load_pcrelative_long(address iLoc); |
|
388 |
static inline bool is_pcrelative_short(address iLoc); |
|
389 |
static inline bool is_pcrelative_long(address iLoc); |
|
390 |
||
391 |
static inline bool is_pcrelative_instruction(address iloc); |
|
392 |
static inline bool is_load_addr_pcrel(address a); |
|
393 |
||
394 |
static void patch_target_addr_pcrel(address pc, address con); |
|
395 |
static void patch_addr_pcrel(address pc, address con) { |
|
396 |
patch_target_addr_pcrel(pc, con); // Just delegate. This is only for nativeInst_s390.cpp. |
|
397 |
} |
|
398 |
||
399 |
//--------------------------------------------------------- |
|
400 |
// Some macros for more comfortable assembler programming. |
|
401 |
//--------------------------------------------------------- |
|
402 |
||
403 |
// NOTE: pass NearLabel T to signal that the branch target T will be bound to a near address. |
|
404 |
||
405 |
void compare32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& target); |
|
406 |
void compareU32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& target); |
|
407 |
void compare64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& target); |
|
408 |
void compareU64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& target); |
|
409 |
||
410 |
void branch_optimized(Assembler::branch_condition cond, address branch_target); |
|
411 |
void branch_optimized(Assembler::branch_condition cond, Label& branch_target); |
|
412 |
void compare_and_branch_optimized(Register r1, |
|
413 |
Register r2, |
|
414 |
Assembler::branch_condition cond, |
|
415 |
address branch_addr, |
|
416 |
bool len64, |
|
417 |
bool has_sign); |
|
418 |
void compare_and_branch_optimized(Register r1, |
|
419 |
jlong x2, |
|
420 |
Assembler::branch_condition cond, |
|
421 |
Label& branch_target, |
|
422 |
bool len64, |
|
423 |
bool has_sign); |
|
424 |
void compare_and_branch_optimized(Register r1, |
|
425 |
Register r2, |
|
426 |
Assembler::branch_condition cond, |
|
427 |
Label& branch_target, |
|
428 |
bool len64, |
|
429 |
bool has_sign); |
|
430 |
||
431 |
// |
|
432 |
// Support for frame handling |
|
433 |
// |
|
434 |
// Specify the register that should be stored as the return pc in the |
|
435 |
// current frame (default is R14). |
|
436 |
inline void save_return_pc(Register pc = Z_R14); |
|
437 |
inline void restore_return_pc(); |
|
438 |
||
439 |
// Get current PC. |
|
440 |
address get_PC(Register result); |
|
441 |
||
442 |
// Get current PC + offset. Offset given in bytes, must be even! |
|
443 |
address get_PC(Register result, int64_t offset); |
|
444 |
||
48332
651a95f30dfb
8193257: PPC64, s390 implementation for Thread-local handshakes
mdoerr
parents:
48094
diff
changeset
|
445 |
// Get size of instruction at pc (which must point to valid code). |
651a95f30dfb
8193257: PPC64, s390 implementation for Thread-local handshakes
mdoerr
parents:
48094
diff
changeset
|
446 |
void instr_size(Register size, Register pc); |
651a95f30dfb
8193257: PPC64, s390 implementation for Thread-local handshakes
mdoerr
parents:
48094
diff
changeset
|
447 |
|
46726
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
448 |
// Accessing, and in particular modifying, a stack location is only safe if |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
449 |
// the stack pointer (Z_SP) is set such that the accessed stack location is |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
450 |
// in the reserved range. |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
451 |
// |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
452 |
// From a performance point of view, it is desirable not to change the SP |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
453 |
// first and then immediately use it to access the freshly reserved space. |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
454 |
// That opens a small gap, though. If, just after storing some value (the |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
455 |
// frame pointer) into the to-be-reserved space, an interrupt is caught, |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
456 |
// the handler might use the space beyond Z_SP for it's own purpose. |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
457 |
// If that happens, the stored value might get altered. |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
458 |
|
42065 | 459 |
// Resize current frame either relatively wrt to current SP or absolute. |
460 |
void resize_frame_sub(Register offset, Register fp, bool load_fp=true); |
|
46726
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
461 |
void resize_frame_abs_with_offset(Register newSP, Register fp, int offset, bool load_fp); |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
462 |
void resize_frame_absolute(Register addr, Register fp, bool load_fp); |
42065 | 463 |
void resize_frame(RegisterOrConstant offset, Register fp, bool load_fp=true); |
464 |
||
465 |
// Push a frame of size bytes, if copy_sp is false, old_sp must already |
|
466 |
// contain a copy of Z_SP. |
|
467 |
void push_frame(Register bytes, Register old_sp, bool copy_sp = true, bool bytes_with_inverted_sign = false); |
|
468 |
||
469 |
// Push a frame of size `bytes'. no abi space provided. |
|
470 |
// Don't rely on register locking, instead pass a scratch register |
|
471 |
// (Z_R0 by default). |
|
472 |
// CAUTION! passing registers >= Z_R2 may produce bad results on |
|
473 |
// old CPUs! |
|
474 |
unsigned int push_frame(unsigned int bytes, Register scratch = Z_R0); |
|
475 |
||
476 |
// Push a frame of size `bytes' with abi160 on top. |
|
477 |
unsigned int push_frame_abi160(unsigned int bytes); |
|
478 |
||
479 |
// Pop current C frame. |
|
480 |
void pop_frame(); |
|
46726
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
481 |
// Pop current C frame and restore return PC register (Z_R14). |
7801367e3cc9
8180659: [s390] micro-optimization in resize_frame_absolute()
lucy
parents:
46369
diff
changeset
|
482 |
void pop_frame_restore_retPC(int frame_size_in_bytes); |
42065 | 483 |
|
484 |
// |
|
485 |
// Calls |
|
486 |
// |
|
487 |
||
488 |
private: |
|
489 |
address _last_calls_return_pc; |
|
490 |
||
491 |
public: |
|
492 |
// Support for VM calls. This is the base routine called by the |
|
493 |
// different versions of call_VM_leaf. The interpreter may customize |
|
494 |
// this version by overriding it for its purposes (e.g., to |
|
495 |
// save/restore additional registers when doing a VM call). |
|
496 |
void call_VM_leaf_base(address entry_point); |
|
497 |
void call_VM_leaf_base(address entry_point, bool allow_relocation); |
|
498 |
||
499 |
// It is imperative that all calls into the VM are handled via the |
|
500 |
// call_VM macros. They make sure that the stack linkage is setup |
|
501 |
// correctly. Call_VM's correspond to ENTRY/ENTRY_X entry points |
|
502 |
// while call_VM_leaf's correspond to LEAF entry points. |
|
503 |
// |
|
504 |
// This is the base routine called by the different versions of |
|
505 |
// call_VM. The interpreter may customize this version by overriding |
|
506 |
// it for its purposes (e.g., to save/restore additional registers |
|
507 |
// when doing a VM call). |
|
508 |
||
509 |
// If no last_java_sp is specified (noreg) then SP will be used instead. |
|
510 |
||
511 |
virtual void call_VM_base( |
|
512 |
Register oop_result, // Where an oop-result ends up if any; use noreg otherwise. |
|
513 |
Register last_java_sp, // To set up last_Java_frame in stubs; use noreg otherwise. |
|
514 |
address entry_point, // The entry point. |
|
515 |
bool check_exception); // Flag which indicates if exception should be checked. |
|
516 |
virtual void call_VM_base( |
|
517 |
Register oop_result, // Where an oop-result ends up if any; use noreg otherwise. |
|
518 |
Register last_java_sp, // To set up last_Java_frame in stubs; use noreg otherwise. |
|
519 |
address entry_point, // The entry point. |
|
520 |
bool allow_relocation, // Flag to request generation of relocatable code. |
|
521 |
bool check_exception); // Flag which indicates if exception should be checked. |
|
522 |
||
523 |
// Call into the VM. |
|
524 |
// Passes the thread pointer (in Z_ARG1) as a prepended argument. |
|
525 |
// Makes sure oop return values are visible to the GC. |
|
526 |
void call_VM(Register oop_result, address entry_point, bool check_exceptions = true); |
|
527 |
void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true); |
|
528 |
void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); |
|
529 |
void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, |
|
530 |
Register arg_3, bool check_exceptions = true); |
|
531 |
||
532 |
void call_VM_static(Register oop_result, address entry_point, bool check_exceptions = true); |
|
533 |
void call_VM_static(Register oop_result, address entry_point, Register arg_1, Register arg_2, |
|
534 |
Register arg_3, bool check_exceptions = true); |
|
535 |
||
536 |
// Overloaded with last_java_sp. |
|
537 |
void call_VM(Register oop_result, Register last_java_sp, address entry_point, bool check_exceptions = true); |
|
538 |
void call_VM(Register oop_result, Register last_java_sp, address entry_point, |
|
539 |
Register arg_1, bool check_exceptions = true); |
|
540 |
void call_VM(Register oop_result, Register last_java_sp, address entry_point, |
|
541 |
Register arg_1, Register arg_2, bool check_exceptions = true); |
|
542 |
void call_VM(Register oop_result, Register last_java_sp, address entry_point, |
|
543 |
Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); |
|
544 |
||
545 |
void call_VM_leaf(address entry_point); |
|
546 |
void call_VM_leaf(address entry_point, Register arg_1); |
|
547 |
void call_VM_leaf(address entry_point, Register arg_1, Register arg_2); |
|
548 |
void call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); |
|
549 |
||
550 |
// Really static VM leaf call (never patched). |
|
551 |
void call_VM_leaf_static(address entry_point); |
|
552 |
void call_VM_leaf_static(address entry_point, Register arg_1); |
|
553 |
void call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2); |
|
554 |
void call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2, Register arg_3); |
|
555 |
||
556 |
// Call a C function via its function entry. Updates and returns _last_calls_return_pc. |
|
557 |
inline address call(Register function_entry); |
|
558 |
inline address call_c(Register function_entry); |
|
559 |
address call_c(address function_entry); |
|
560 |
// Variant for really static (non-relocatable) calls which are never patched. |
|
561 |
address call_c_static(address function_entry); |
|
562 |
// TOC or pc-relative call + emits a runtime_call relocation. |
|
563 |
address call_c_opt(address function_entry); |
|
564 |
||
565 |
inline address call_stub(Register function_entry); |
|
566 |
inline address call_stub(address function_entry); |
|
567 |
||
568 |
// Get the pc where the last call will return to. Returns _last_calls_return_pc. |
|
569 |
inline address last_calls_return_pc(); |
|
570 |
||
571 |
private: |
|
572 |
static bool is_call_far_patchable_variant0_at(address instruction_addr); // Dynamic TOC: load target addr from CP and call. |
|
573 |
static bool is_call_far_patchable_variant2_at(address instruction_addr); // PC-relative call, prefixed with NOPs. |
|
574 |
||
575 |
||
576 |
public: |
|
577 |
bool call_far_patchable(address target, int64_t toc_offset); |
|
578 |
static bool is_call_far_patchable_at(address inst_start); // All supported forms of patchable calls. |
|
579 |
static bool is_call_far_patchable_pcrelative_at(address inst_start); // Pc-relative call with leading nops. |
|
580 |
static bool is_call_far_pcrelative(address instruction_addr); // Pure far pc-relative call, with one leading size adjustment nop. |
|
581 |
static void set_dest_of_call_far_patchable_at(address inst_start, address target, int64_t toc_offset); |
|
582 |
static address get_dest_of_call_far_patchable_at(address inst_start, address toc_start); |
|
583 |
||
584 |
void align_call_far_patchable(address pc); |
|
585 |
||
586 |
// PCrelative TOC access. |
|
587 |
||
588 |
// This value is independent of code position - constant for the lifetime of the VM. |
|
589 |
static int call_far_patchable_size() { |
|
590 |
return load_const_from_toc_size() + call_byregister_size(); |
|
591 |
} |
|
592 |
||
593 |
static int call_far_patchable_ret_addr_offset() { return call_far_patchable_size(); } |
|
594 |
||
595 |
static bool call_far_patchable_requires_alignment_nop(address pc) { |
|
596 |
int size = call_far_patchable_size(); |
|
597 |
return ((intptr_t)(pc + size) & 0x03L) != 0; |
|
598 |
} |
|
599 |
||
600 |
// END OF PCrelative TOC access. |
|
601 |
||
602 |
static int jump_byregister_size() { return 2; } |
|
603 |
static int jump_pcrelative_size() { return 4; } |
|
604 |
static int jump_far_pcrelative_size() { return 6; } |
|
605 |
static int call_byregister_size() { return 2; } |
|
606 |
static int call_pcrelative_size() { return 4; } |
|
607 |
static int call_far_pcrelative_size() { return 2 + 6; } // Prepend each BRASL with a nop. |
|
608 |
static int call_far_pcrelative_size_raw() { return 6; } // Prepend each BRASL with a nop. |
|
609 |
||
610 |
// |
|
611 |
// Java utilities |
|
612 |
// |
|
613 |
||
614 |
// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. |
|
615 |
// The implementation is only non-empty for the InterpreterMacroAssembler, |
|
616 |
// as only the interpreter handles PopFrame and ForceEarlyReturn requests. |
|
617 |
virtual void check_and_handle_popframe(Register java_thread); |
|
618 |
virtual void check_and_handle_earlyret(Register java_thread); |
|
619 |
||
620 |
// Polling page support. |
|
621 |
enum poll_mask { |
|
622 |
mask_stackbang = 0xde, // 222 (dec) |
|
623 |
mask_safepoint = 0x6f, // 111 (dec) |
|
624 |
mask_profiling = 0xba // 186 (dec) |
|
625 |
}; |
|
626 |
||
627 |
// Read from the polling page. |
|
628 |
void load_from_polling_page(Register polling_page_address, int64_t offset = 0); |
|
629 |
||
630 |
// Check if given instruction is a read from the polling page |
|
631 |
// as emitted by load_from_polling_page. |
|
632 |
static bool is_load_from_polling_page(address instr_loc); |
|
633 |
// Extract poll address from instruction and ucontext. |
|
634 |
static address get_poll_address(address instr_loc, void* ucontext); |
|
635 |
// Extract poll register from instruction. |
|
636 |
static uint get_poll_register(address instr_loc); |
|
637 |
||
48332
651a95f30dfb
8193257: PPC64, s390 implementation for Thread-local handshakes
mdoerr
parents:
48094
diff
changeset
|
638 |
// Check if safepoint requested and if so branch |
651a95f30dfb
8193257: PPC64, s390 implementation for Thread-local handshakes
mdoerr
parents:
48094
diff
changeset
|
639 |
void safepoint_poll(Label& slow_path, Register temp_reg); |
651a95f30dfb
8193257: PPC64, s390 implementation for Thread-local handshakes
mdoerr
parents:
48094
diff
changeset
|
640 |
|
42065 | 641 |
// Stack overflow checking |
642 |
void bang_stack_with_offset(int offset); |
|
643 |
||
43420
a056d6465ef9
8172049: [s390] Implement "JEP 270: Reserved Stack Areas for Critical Sections".
goetz
parents:
42556
diff
changeset
|
644 |
// Check for reserved stack access in method being exited. If the reserved |
a056d6465ef9
8172049: [s390] Implement "JEP 270: Reserved Stack Areas for Critical Sections".
goetz
parents:
42556
diff
changeset
|
645 |
// stack area was accessed, protect it again and throw StackOverflowError. |
a056d6465ef9
8172049: [s390] Implement "JEP 270: Reserved Stack Areas for Critical Sections".
goetz
parents:
42556
diff
changeset
|
646 |
// Uses Z_R1. |
a056d6465ef9
8172049: [s390] Implement "JEP 270: Reserved Stack Areas for Critical Sections".
goetz
parents:
42556
diff
changeset
|
647 |
void reserved_stack_check(Register return_pc); |
a056d6465ef9
8172049: [s390] Implement "JEP 270: Reserved Stack Areas for Critical Sections".
goetz
parents:
42556
diff
changeset
|
648 |
|
42065 | 649 |
// Atomics |
650 |
// -- none? |
|
651 |
||
652 |
void tlab_allocate(Register obj, // Result: pointer to object after successful allocation |
|
653 |
Register var_size_in_bytes, // Object size in bytes if unknown at compile time; invalid otherwise. |
|
654 |
int con_size_in_bytes, // Object size in bytes if known at compile time. |
|
655 |
Register t1, // temp register |
|
656 |
Label& slow_case); // Continuation point if fast allocation fails. |
|
657 |
||
658 |
// Emitter for interface method lookup. |
|
659 |
// input: recv_klass, intf_klass, itable_index |
|
660 |
// output: method_result |
|
661 |
// kills: itable_index, temp1_reg, Z_R0, Z_R1 |
|
662 |
void lookup_interface_method(Register recv_klass, |
|
663 |
Register intf_klass, |
|
664 |
RegisterOrConstant itable_index, |
|
665 |
Register method_result, |
|
666 |
Register temp1_reg, |
|
48585
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
667 |
Label& no_such_interface, |
d9fcb7ba8133
8191907: PPC64 and s390 parts of JDK-8174962: Better interface invocations
mdoerr
parents:
48332
diff
changeset
|
668 |
bool return_method = true); |
42065 | 669 |
|
670 |
// virtual method calling |
|
671 |
void lookup_virtual_method(Register recv_klass, |
|
672 |
RegisterOrConstant vtable_index, |
|
673 |
Register method_result); |
|
674 |
||
675 |
// Factor out code to call ic_miss_handler. |
|
676 |
unsigned int call_ic_miss_handler(Label& ICM, int trapMarker, int requiredSize, Register scratch); |
|
677 |
void nmethod_UEP(Label& ic_miss); |
|
678 |
||
679 |
// Emitters for "partial subtype" checks. |
|
680 |
||
681 |
// Test sub_klass against super_klass, with fast and slow paths. |
|
682 |
||
683 |
// The fast path produces a tri-state answer: yes / no / maybe-slow. |
|
684 |
// One of the three labels can be NULL, meaning take the fall-through. |
|
685 |
// If super_check_offset is -1, the value is loaded up from super_klass. |
|
686 |
// No registers are killed, except temp_reg and temp2_reg. |
|
687 |
// If super_check_offset is not -1, temp1_reg is not used and can be noreg. |
|
688 |
void check_klass_subtype_fast_path(Register sub_klass, |
|
689 |
Register super_klass, |
|
690 |
Register temp1_reg, |
|
691 |
Label* L_success, |
|
692 |
Label* L_failure, |
|
693 |
Label* L_slow_path, |
|
694 |
RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); |
|
695 |
||
696 |
// The rest of the type check; must be wired to a corresponding fast path. |
|
697 |
// It does not repeat the fast path logic, so don't use it standalone. |
|
698 |
// The temp_reg can be noreg, if no temps are available. |
|
699 |
// It can also be sub_klass or super_klass, meaning it's OK to kill that one. |
|
700 |
// Updates the sub's secondary super cache as necessary. |
|
701 |
void check_klass_subtype_slow_path(Register Rsubklass, |
|
702 |
Register Rsuperklas, |
|
703 |
Register Rarray_ptr, // tmp |
|
704 |
Register Rlength, // tmp |
|
705 |
Label* L_success, |
|
706 |
Label* L_failure); |
|
707 |
||
708 |
// Simplified, combined version, good for typical uses. |
|
709 |
// Falls through on failure. |
|
710 |
void check_klass_subtype(Register sub_klass, |
|
711 |
Register super_klass, |
|
712 |
Register temp1_reg, |
|
713 |
Register temp2_reg, |
|
714 |
Label& L_success); |
|
715 |
||
55343
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
716 |
void clinit_barrier(Register klass, |
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
717 |
Register thread, |
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
718 |
Label* L_fast_path = NULL, |
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
719 |
Label* L_slow_path = NULL); |
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
720 |
|
42065 | 721 |
// Increment a counter at counter_address when the eq condition code is set. |
722 |
// Kills registers tmp1_reg and tmp2_reg and preserves the condition code. |
|
723 |
void increment_counter_eq(address counter_address, Register tmp1_reg, Register tmp2_reg); |
|
724 |
// Biased locking support |
|
725 |
// Upon entry,obj_reg must contain the target object, and mark_reg |
|
726 |
// must contain the target object's header. |
|
727 |
// Destroys mark_reg if an attempt is made to bias an anonymously |
|
728 |
// biased lock. In this case a failure will go either to the slow |
|
729 |
// case or fall through with the notEqual condition code set with |
|
730 |
// the expectation that the slow case in the runtime will be called. |
|
731 |
// In the fall-through case where the CAS-based lock is done, |
|
732 |
// mark_reg is not destroyed. |
|
733 |
void biased_locking_enter(Register obj_reg, Register mark_reg, Register temp_reg, |
|
734 |
Register temp2_reg, Label& done, Label* slow_case = NULL); |
|
735 |
// Upon entry, the base register of mark_addr must contain the oop. |
|
736 |
// Destroys temp_reg. |
|
737 |
// If allow_delay_slot_filling is set to true, the next instruction |
|
738 |
// emitted after this one will go in an annulled delay slot if the |
|
739 |
// biased locking exit case failed. |
|
740 |
void biased_locking_exit(Register mark_addr, Register temp_reg, Label& done); |
|
741 |
||
742 |
void compiler_fast_lock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias = UseBiasedLocking); |
|
743 |
void compiler_fast_unlock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias = UseBiasedLocking); |
|
744 |
||
44406
a46a6c4d1dd9
8176100: [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles
mgerdin
parents:
44093
diff
changeset
|
745 |
void resolve_jobject(Register value, Register tmp1, Register tmp2); |
a46a6c4d1dd9
8176100: [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles
mgerdin
parents:
44093
diff
changeset
|
746 |
|
42065 | 747 |
// Support for last Java frame (but use call_VM instead where possible). |
748 |
private: |
|
749 |
void set_last_Java_frame(Register last_Java_sp, Register last_Java_pc, bool allow_relocation); |
|
750 |
void reset_last_Java_frame(bool allow_relocation); |
|
751 |
void set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1, bool allow_relocation); |
|
752 |
public: |
|
753 |
inline void set_last_Java_frame(Register last_java_sp, Register last_Java_pc); |
|
754 |
inline void set_last_Java_frame_static(Register last_java_sp, Register last_Java_pc); |
|
755 |
inline void reset_last_Java_frame(void); |
|
756 |
inline void reset_last_Java_frame_static(void); |
|
757 |
inline void set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1); |
|
758 |
inline void set_top_ijava_frame_at_SP_as_last_Java_frame_static(Register sp, Register tmp1); |
|
759 |
||
760 |
void set_thread_state(JavaThreadState new_state); |
|
761 |
||
762 |
// Read vm result from thread. |
|
763 |
void get_vm_result (Register oop_result); |
|
764 |
void get_vm_result_2(Register result); |
|
765 |
||
766 |
// Vm result is currently getting hijacked to for oop preservation. |
|
767 |
void set_vm_result(Register oop_result); |
|
768 |
||
769 |
// Support for NULL-checks |
|
770 |
// |
|
771 |
// Generates code that causes a NULL OS exception if the content of reg is NULL. |
|
772 |
// If the accessed location is M[reg + offset] and the offset is known, provide the |
|
773 |
// offset. No explicit code generation is needed if the offset is within a certain |
|
774 |
// range (0 <= offset <= page_size). |
|
775 |
// |
|
776 |
// %%%%%% Currently not done for z/Architecture |
|
777 |
||
778 |
void null_check(Register reg, Register tmp = Z_R0, int64_t offset = -1); |
|
779 |
static bool needs_explicit_null_check(intptr_t offset); // Implemented in shared file ?! |
|
52462
4ad404da0088
8213199: GC abstraction for Assembler::needs_explicit_null_check()
rkennke
parents:
52460
diff
changeset
|
780 |
static bool uses_implicit_null_check(void* address); |
42065 | 781 |
|
782 |
// Klass oop manipulations if compressed. |
|
783 |
void encode_klass_not_null(Register dst, Register src = noreg); |
|
784 |
void decode_klass_not_null(Register dst, Register src); |
|
785 |
void decode_klass_not_null(Register dst); |
|
786 |
void load_klass(Register klass, Address mem); |
|
787 |
void load_klass(Register klass, Register src_oop); |
|
788 |
void load_prototype_header(Register Rheader, Register Rsrc_oop); |
|
789 |
void store_klass(Register klass, Register dst_oop, Register ck = noreg); // Klass will get compressed if ck not provided. |
|
790 |
void store_klass_gap(Register s, Register dst_oop); |
|
791 |
||
792 |
// This function calculates the size of the code generated by |
|
793 |
// decode_klass_not_null(register dst) |
|
794 |
// when (Universe::heap() != NULL). Hence, if the instructions |
|
795 |
// it generates change, then this method needs to be updated. |
|
796 |
static int instr_size_for_decode_klass_not_null(); |
|
797 |
||
798 |
void encode_heap_oop(Register oop); |
|
799 |
void encode_heap_oop_not_null(Register oop); |
|
800 |
||
801 |
static int get_oop_base_pow2_offset(uint64_t oop_base); |
|
802 |
int get_oop_base(Register Rbase, uint64_t oop_base); |
|
803 |
int get_oop_base_complement(Register Rbase, uint64_t oop_base); |
|
804 |
void compare_heap_oop(Register Rop1, Address mem, bool maybeNULL); |
|
805 |
void compare_klass_ptr(Register Rop1, int64_t disp, Register Rbase, bool maybeNULL); |
|
50162
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49748
diff
changeset
|
806 |
|
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49748
diff
changeset
|
807 |
// Access heap oop, handle encoding and GC barriers. |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49748
diff
changeset
|
808 |
private: |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49748
diff
changeset
|
809 |
void access_store_at(BasicType type, DecoratorSet decorators, |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49748
diff
changeset
|
810 |
const Address& addr, Register val, |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49748
diff
changeset
|
811 |
Register tmp1, Register tmp2, Register tmp3); |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49748
diff
changeset
|
812 |
void access_load_at(BasicType type, DecoratorSet decorators, |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49748
diff
changeset
|
813 |
const Address& addr, Register dst, |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49748
diff
changeset
|
814 |
Register tmp1, Register tmp2, Label *is_null = NULL); |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49748
diff
changeset
|
815 |
|
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49748
diff
changeset
|
816 |
public: |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49748
diff
changeset
|
817 |
// tmp1 and tmp2 are used with decorators ON_PHANTOM_OOP_REF or ON_WEAK_OOP_REF. |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49748
diff
changeset
|
818 |
void load_heap_oop(Register dest, const Address &a, |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49748
diff
changeset
|
819 |
Register tmp1, Register tmp2, |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49748
diff
changeset
|
820 |
DecoratorSet decorators = 0, Label *is_null = NULL); |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49748
diff
changeset
|
821 |
void store_heap_oop(Register Roop, const Address &a, |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49748
diff
changeset
|
822 |
Register tmp1, Register tmp2, Register tmp3, |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49748
diff
changeset
|
823 |
DecoratorSet decorators = 0); |
f9fe56417050
8202713: Create a MacroAssembler::access_load/store_at wrapper for S390 and PPC
mdoerr
parents:
49748
diff
changeset
|
824 |
|
42065 | 825 |
void oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL, |
826 |
Register Rbase = Z_R1, int pow2_offset = -1, bool only32bitValid = false); |
|
827 |
void oop_decoder(Register Rdst, Register Rsrc, bool maybeNULL, |
|
828 |
Register Rbase = Z_R1, int pow2_offset = -1); |
|
829 |
||
46961
c9094b1e5f87
8186088: ConstantPoolCache::_resolved_references is not a JNIHandle
coleenp
parents:
46726
diff
changeset
|
830 |
void resolve_oop_handle(Register result); |
55342
596ae6c3ef6f
8223249: [s390] Cleanup TemplateInterpreterGenerator::generate_fixed_frame
mdoerr
parents:
54542
diff
changeset
|
831 |
void load_mirror_from_const_method(Register mirror, Register const_method); |
55343
03d417fd7d9a
8224827: Implement fast class initialization checks on s390
mdoerr
parents:
55342
diff
changeset
|
832 |
void load_method_holder(Register holder, Register method); |
42065 | 833 |
|
834 |
//-------------------------- |
|
48094
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47216
diff
changeset
|
835 |
//--- Operations on arrays. |
42065 | 836 |
//-------------------------- |
54542
0a4214c90a55
8222271: [s390] optimize register usage in C2 instruction forms for clearing arrays
rrich
parents:
53244
diff
changeset
|
837 |
unsigned int Clear_Array(Register cnt_arg, Register base_pointer_arg, Register odd_tmp_reg); |
42065 | 838 |
unsigned int Clear_Array_Const(long cnt, Register base); |
54542
0a4214c90a55
8222271: [s390] optimize register usage in C2 instruction forms for clearing arrays
rrich
parents:
53244
diff
changeset
|
839 |
unsigned int Clear_Array_Const_Big(long cnt, Register base_pointer_arg, Register odd_tmp_reg); |
42065 | 840 |
unsigned int CopyRawMemory_AlignedDisjoint(Register src_reg, Register dst_reg, |
841 |
Register cnt_reg, |
|
842 |
Register tmp1_reg, Register tmp2_reg); |
|
843 |
||
844 |
//------------------------------------------- |
|
845 |
// Special String Intrinsics Implementation. |
|
846 |
//------------------------------------------- |
|
847 |
// Intrinsics for CompactStrings |
|
48094
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47216
diff
changeset
|
848 |
// Restores: src, dst |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47216
diff
changeset
|
849 |
// Uses: cnt |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47216
diff
changeset
|
850 |
// Kills: tmp, Z_R0, Z_R1. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47216
diff
changeset
|
851 |
// Early clobber: result. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47216
diff
changeset
|
852 |
// Boolean precise controls accuracy of result value. |
58959 | 853 |
#ifdef COMPILER2 |
48094
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47216
diff
changeset
|
854 |
unsigned int string_compress(Register result, Register src, Register dst, Register cnt, |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47216
diff
changeset
|
855 |
Register tmp, bool precise); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47216
diff
changeset
|
856 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47216
diff
changeset
|
857 |
// Inflate byte[] to char[]. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47216
diff
changeset
|
858 |
unsigned int string_inflate_trot(Register src, Register dst, Register cnt, Register tmp); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47216
diff
changeset
|
859 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47216
diff
changeset
|
860 |
// Inflate byte[] to char[]. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47216
diff
changeset
|
861 |
// Restores: src, dst |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47216
diff
changeset
|
862 |
// Uses: cnt |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47216
diff
changeset
|
863 |
// Kills: tmp, Z_R0, Z_R1. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47216
diff
changeset
|
864 |
unsigned int string_inflate(Register src, Register dst, Register cnt, Register tmp); |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47216
diff
changeset
|
865 |
|
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47216
diff
changeset
|
866 |
// Inflate byte[] to char[], length known at compile time. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47216
diff
changeset
|
867 |
// Restores: src, dst |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47216
diff
changeset
|
868 |
// Kills: tmp, Z_R0, Z_R1. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47216
diff
changeset
|
869 |
// Note: |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47216
diff
changeset
|
870 |
// len is signed int. Counts # characters, not bytes. |
bca569f79fa1
8189793: [s390]: Improve String compress/inflate by exploiting vector instructions
lucy
parents:
47216
diff
changeset
|
871 |
unsigned int string_inflate_const(Register src, Register dst, Register tmp, int len); |
42065 | 872 |
|
873 |
// Kills src. |
|
874 |
unsigned int has_negatives(Register result, Register src, Register cnt, |
|
875 |
Register odd_reg, Register even_reg, Register tmp); |
|
876 |
||
877 |
unsigned int string_compare(Register str1, Register str2, Register cnt1, Register cnt2, |
|
878 |
Register odd_reg, Register even_reg, Register result, int ae); |
|
879 |
||
880 |
unsigned int array_equals(bool is_array_equ, Register ary1, Register ary2, Register limit, |
|
881 |
Register odd_reg, Register even_reg, Register result, bool is_byte); |
|
882 |
||
883 |
unsigned int string_indexof(Register result, Register haystack, Register haycnt, |
|
884 |
Register needle, Register needlecnt, int needlecntval, |
|
885 |
Register odd_reg, Register even_reg, int ae); |
|
886 |
||
887 |
unsigned int string_indexof_char(Register result, Register haystack, Register haycnt, |
|
888 |
Register needle, jchar needleChar, Register odd_reg, Register even_reg, bool is_byte); |
|
58959 | 889 |
#endif |
42065 | 890 |
|
891 |
// Emit an oop const to the constant pool and set a relocation info |
|
892 |
// with address current_pc. Return the TOC offset of the constant. |
|
893 |
int store_const_in_toc(AddressLiteral& val); |
|
894 |
int store_oop_in_toc(AddressLiteral& oop); |
|
895 |
// Emit an oop const to the constant pool via store_oop_in_toc, or |
|
896 |
// emit a scalar const to the constant pool via store_const_in_toc, |
|
897 |
// and load the constant into register dst. |
|
898 |
bool load_const_from_toc(Register dst, AddressLiteral& a, Register Rtoc = noreg); |
|
899 |
// Get CPU version dependent size of load_const sequence. |
|
900 |
// The returned value is valid only for code sequences |
|
901 |
// generated by load_const, not load_const_optimized. |
|
902 |
static int load_const_from_toc_size() { |
|
903 |
return load_long_pcrelative_size(); |
|
904 |
} |
|
905 |
bool load_oop_from_toc(Register dst, AddressLiteral& a, Register Rtoc = noreg); |
|
906 |
static intptr_t get_const_from_toc(address pc); |
|
907 |
static void set_const_in_toc(address pc, unsigned long new_data, CodeBlob *cb); |
|
908 |
||
909 |
// Dynamic TOC. |
|
910 |
static bool is_load_const(address a); |
|
911 |
static bool is_load_const_from_toc_pcrelative(address a); |
|
912 |
static bool is_load_const_from_toc(address a) { return is_load_const_from_toc_pcrelative(a); } |
|
913 |
||
914 |
// PCrelative TOC access. |
|
915 |
static bool is_call_byregister(address a) { return is_z_basr(*(short*)a); } |
|
916 |
static bool is_load_const_from_toc_call(address a); |
|
917 |
static bool is_load_const_call(address a); |
|
918 |
static int load_const_call_size() { return load_const_size() + call_byregister_size(); } |
|
919 |
static int load_const_from_toc_call_size() { return load_const_from_toc_size() + call_byregister_size(); } |
|
920 |
// Offset is +/- 2**32 -> use long. |
|
921 |
static long get_load_const_from_toc_offset(address a); |
|
922 |
||
923 |
// Bit operations for single register operands. |
|
924 |
inline void lshift(Register r, int places, bool doubl = true); // << |
|
925 |
inline void rshift(Register r, int places, bool doubl = true); // >> |
|
926 |
||
927 |
// |
|
928 |
// Debugging |
|
929 |
// |
|
930 |
||
931 |
// Assert on CC (condition code in CPU state). |
|
932 |
void asm_assert(bool check_equal, const char* msg, int id) PRODUCT_RETURN; |
|
933 |
void asm_assert_low(const char *msg, int id) PRODUCT_RETURN; |
|
934 |
void asm_assert_high(const char *msg, int id) PRODUCT_RETURN; |
|
935 |
void asm_assert_eq(const char* msg, int id) { asm_assert(true, msg, id); } |
|
936 |
void asm_assert_ne(const char* msg, int id) { asm_assert(false, msg, id); } |
|
937 |
||
938 |
void asm_assert_static(bool check_equal, const char* msg, int id) PRODUCT_RETURN; |
|
939 |
||
940 |
private: |
|
941 |
// Emit assertions. |
|
942 |
void asm_assert_mems_zero(bool check_equal, bool allow_relocation, int size, int64_t mem_offset, |
|
943 |
Register mem_base, const char* msg, int id) PRODUCT_RETURN; |
|
944 |
||
945 |
public: |
|
946 |
inline void asm_assert_mem4_is_zero(int64_t mem_offset, Register mem_base, const char* msg, int id) { |
|
947 |
asm_assert_mems_zero(true, true, 4, mem_offset, mem_base, msg, id); |
|
948 |
} |
|
949 |
inline void asm_assert_mem8_is_zero(int64_t mem_offset, Register mem_base, const char* msg, int id) { |
|
950 |
asm_assert_mems_zero(true, true, 8, mem_offset, mem_base, msg, id); |
|
951 |
} |
|
952 |
inline void asm_assert_mem4_isnot_zero(int64_t mem_offset, Register mem_base, const char* msg, int id) { |
|
953 |
asm_assert_mems_zero(false, true, 4, mem_offset, mem_base, msg, id); |
|
954 |
} |
|
955 |
inline void asm_assert_mem8_isnot_zero(int64_t mem_offset, Register mem_base, const char* msg, int id) { |
|
956 |
asm_assert_mems_zero(false, true, 8, mem_offset, mem_base, msg, id); |
|
957 |
} |
|
958 |
||
959 |
inline void asm_assert_mem4_is_zero_static(int64_t mem_offset, Register mem_base, const char* msg, int id) { |
|
960 |
asm_assert_mems_zero(true, false, 4, mem_offset, mem_base, msg, id); |
|
961 |
} |
|
962 |
inline void asm_assert_mem8_is_zero_static(int64_t mem_offset, Register mem_base, const char* msg, int id) { |
|
963 |
asm_assert_mems_zero(true, false, 8, mem_offset, mem_base, msg, id); |
|
964 |
} |
|
965 |
inline void asm_assert_mem4_isnot_zero_static(int64_t mem_offset, Register mem_base, const char* msg, int id) { |
|
966 |
asm_assert_mems_zero(false, false, 4, mem_offset, mem_base, msg, id); |
|
967 |
} |
|
968 |
inline void asm_assert_mem8_isnot_zero_static(int64_t mem_offset, Register mem_base, const char* msg, int id) { |
|
969 |
asm_assert_mems_zero(false, false, 8, mem_offset, mem_base, msg, id); |
|
970 |
} |
|
971 |
void asm_assert_frame_size(Register expected_size, Register tmp, const char* msg, int id) PRODUCT_RETURN; |
|
972 |
||
973 |
// Verify Z_thread contents. |
|
974 |
void verify_thread(); |
|
975 |
||
976 |
// Only if +VerifyOops. |
|
977 |
void verify_oop(Register reg, const char* s = "broken oop"); |
|
978 |
||
979 |
// TODO: verify_method and klass metadata (compare against vptr?). |
|
980 |
void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} |
|
981 |
void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line) {} |
|
982 |
||
983 |
#define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) |
|
984 |
#define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) |
|
985 |
||
986 |
private: |
|
987 |
// Generate printout in stop(). |
|
988 |
static const char* stop_types[]; |
|
989 |
enum { |
|
990 |
stop_stop = 0, |
|
991 |
stop_untested = 1, |
|
992 |
stop_unimplemented = 2, |
|
993 |
stop_shouldnotreachhere = 3, |
|
994 |
stop_end = 4 |
|
995 |
}; |
|
996 |
// Prints msg and stops execution. |
|
997 |
void stop(int type, const char* msg, int id = 0); |
|
998 |
address stop_chain(address reentry, int type, const char* msg, int id, bool allow_relocation); // Non-relocateable code only!! |
|
999 |
void stop_static(int type, const char* msg, int id); // Non-relocateable code only!! |
|
1000 |
||
1001 |
public: |
|
1002 |
||
1003 |
// Prints msg and stops. |
|
1004 |
address stop_chain( address reentry, const char* msg = "", int id = 0) { return stop_chain(reentry, stop_stop, msg, id, true); } |
|
1005 |
address stop_chain_static(address reentry, const char* msg = "", int id = 0) { return stop_chain(reentry, stop_stop, msg, id, false); } |
|
1006 |
void stop_static (const char* msg = "", int id = 0) { stop_static(stop_stop, msg, id); } |
|
1007 |
void stop (const char* msg = "", int id = 0) { stop(stop_stop, msg, id); } |
|
1008 |
void untested (const char* msg = "", int id = 0) { stop(stop_untested, msg, id); } |
|
1009 |
void unimplemented(const char* msg = "", int id = 0) { stop(stop_unimplemented, msg, id); } |
|
1010 |
void should_not_reach_here(const char* msg = "", int id = -1) { stop(stop_shouldnotreachhere, msg, id); } |
|
1011 |
||
1012 |
// Factor out part of stop into subroutine to save space. |
|
1013 |
void stop_subroutine(); |
|
1014 |
||
1015 |
// Prints msg, but don't stop. |
|
1016 |
void warn(const char* msg); |
|
1017 |
||
1018 |
//----------------------------- |
|
1019 |
//--- basic block tracing code |
|
1020 |
//----------------------------- |
|
1021 |
void trace_basic_block(uint i); |
|
1022 |
void init_basic_block_trace(); |
|
1023 |
// Number of bytes a basic block gets larger due to the tracing code macro (worst case). |
|
1024 |
// Currently, worst case is 48 bytes. 64 puts us securely on the safe side. |
|
1025 |
static int basic_blck_trace_blk_size_incr() { return 64; } |
|
1026 |
||
1027 |
// Write pattern 0x0101010101010101 in region [low-before, high+after]. |
|
1028 |
// Low and high may be the same registers. Before and after are |
|
1029 |
// the numbers of 8-byte words. |
|
1030 |
void zap_from_to(Register low, Register high, Register tmp1 = Z_R0, Register tmp2 = Z_R1, |
|
1031 |
int before = 0, int after = 0) PRODUCT_RETURN; |
|
1032 |
||
1033 |
// Emitters for CRC32 calculation. |
|
46315
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
43420
diff
changeset
|
1034 |
// A note on invertCRC: |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
43420
diff
changeset
|
1035 |
// Unfortunately, internal representation of crc differs between CRC32 and CRC32C. |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
43420
diff
changeset
|
1036 |
// CRC32 holds it's current crc value in the externally visible representation. |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
43420
diff
changeset
|
1037 |
// CRC32C holds it's current crc value in internal format, ready for updating. |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
43420
diff
changeset
|
1038 |
// Thus, the crc value must be bit-flipped before updating it in the CRC32 case. |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
43420
diff
changeset
|
1039 |
// In the CRC32C case, it must be bit-flipped when it is given to the outside world (getValue()). |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
43420
diff
changeset
|
1040 |
// The bool invertCRC parameter indicates whether bit-flipping is required before updates. |
42065 | 1041 |
private: |
1042 |
void fold_byte_crc32(Register crc, Register table, Register val, Register tmp); |
|
1043 |
void fold_8bit_crc32(Register crc, Register table, Register tmp); |
|
46315
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
43420
diff
changeset
|
1044 |
void update_byte_crc32( Register crc, Register val, Register table); |
42065 | 1045 |
void update_byteLoop_crc32(Register crc, Register buf, Register len, Register table, |
46315
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
43420
diff
changeset
|
1046 |
Register data); |
42065 | 1047 |
void update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc, |
1048 |
Register t0, Register t1, Register t2, Register t3); |
|
1049 |
public: |
|
46315
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
43420
diff
changeset
|
1050 |
void kernel_crc32_singleByteReg(Register crc, Register val, Register table, |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
43420
diff
changeset
|
1051 |
bool invertCRC); |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
43420
diff
changeset
|
1052 |
void kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp, |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
43420
diff
changeset
|
1053 |
bool invertCRC); |
42065 | 1054 |
void kernel_crc32_1byte(Register crc, Register buf, Register len, Register table, |
46315
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
43420
diff
changeset
|
1055 |
Register t0, Register t1, Register t2, Register t3, |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
43420
diff
changeset
|
1056 |
bool invertCRC); |
42065 | 1057 |
void kernel_crc32_1word(Register crc, Register buf, Register len, Register table, |
46315
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
43420
diff
changeset
|
1058 |
Register t0, Register t1, Register t2, Register t3, |
a796c32af782
8175368: [s390] Provide intrinsic implementation for CRC32C
lucy
parents:
43420
diff
changeset
|
1059 |
bool invertCRC); |
42065 | 1060 |
|
1061 |
// Emitters for BigInteger.multiplyToLen intrinsic |
|
1062 |
// note: length of result array (zlen) is passed on the stack |
|
1063 |
private: |
|
1064 |
void add2_with_carry(Register dest_hi, Register dest_lo, |
|
1065 |
Register src1, Register src2); |
|
1066 |
void multiply_64_x_64_loop(Register x, Register xstart, |
|
1067 |
Register x_xstart, |
|
1068 |
Register y, Register y_idx, Register z, |
|
1069 |
Register carry, Register product, |
|
1070 |
Register idx, Register kdx); |
|
1071 |
void multiply_add_128_x_128(Register x_xstart, Register y, Register z, |
|
1072 |
Register yz_idx, Register idx, |
|
1073 |
Register carry, Register product, int offset); |
|
1074 |
void multiply_128_x_128_loop(Register x_xstart, |
|
1075 |
Register y, Register z, |
|
1076 |
Register yz_idx, Register idx, |
|
1077 |
Register jdx, |
|
1078 |
Register carry, Register product, |
|
1079 |
Register carry2); |
|
1080 |
public: |
|
1081 |
void multiply_to_len(Register x, Register xlen, |
|
1082 |
Register y, Register ylen, |
|
1083 |
Register z, |
|
1084 |
Register tmp1, Register tmp2, |
|
1085 |
Register tmp3, Register tmp4, Register tmp5); |
|
1086 |
}; |
|
1087 |
||
1088 |
/** |
|
1089 |
* class SkipIfEqual: |
|
1090 |
* |
|
1091 |
* Instantiating this class will result in assembly code being output that will |
|
1092 |
* jump around any code emitted between the creation of the instance and it's |
|
1093 |
* automatic destruction at the end of a scope block, depending on the value of |
|
1094 |
* the flag passed to the constructor, which will be checked at run-time. |
|
1095 |
*/ |
|
1096 |
class SkipIfEqual { |
|
1097 |
private: |
|
1098 |
MacroAssembler* _masm; |
|
1099 |
Label _label; |
|
1100 |
||
1101 |
public: |
|
1102 |
SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value, Register _rscratch); |
|
1103 |
~SkipIfEqual(); |
|
1104 |
}; |
|
1105 |
||
1106 |
#ifdef ASSERT |
|
1107 |
// Return false (e.g. important for our impl. of virtual calls). |
|
1108 |
inline bool AbstractAssembler::pd_check_instruction_mark() { return false; } |
|
1109 |
#endif |
|
1110 |
||
53244
9807daeb47c4
8216167: Update include guards to reflect correct directories
coleenp
parents:
52760
diff
changeset
|
1111 |
#endif // CPU_S390_MACROASSEMBLER_S390_HPP |