author | eosterlund |
Wed, 21 Mar 2018 14:38:32 +0100 | |
changeset 49484 | ee8fa73b90f9 |
parent 49364 | 601146c66cad |
child 49950 | 7b916885654d |
permissions | -rw-r--r-- |
42664 | 1 |
/* |
49364
601146c66cad
8173070: Remove ValueObj class for allocation subclassing for runtime code
coleenp
parents:
49010
diff
changeset
|
2 |
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. |
42664 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 |
* or visit www.oracle.com if you need additional information or have any |
|
21 |
* questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
25 |
#ifndef CPU_ARM_VM_MACROASSEMBLER_ARM_HPP |
|
26 |
#define CPU_ARM_VM_MACROASSEMBLER_ARM_HPP |
|
27 |
||
28 |
#include "code/relocInfo.hpp" |
|
29 |
#include "code/relocInfo_ext.hpp" |
|
30 |
||
31 |
class BiasedLockingCounters; |
|
32 |
||
33 |
// Introduced AddressLiteral and its subclasses to ease portability from |
|
34 |
// x86 and avoid relocation issues |
|
49364
601146c66cad
8173070: Remove ValueObj class for allocation subclassing for runtime code
coleenp
parents:
49010
diff
changeset
|
35 |
class AddressLiteral { |
42664 | 36 |
RelocationHolder _rspec; |
37 |
// Typically we use AddressLiterals we want to use their rval |
|
38 |
// However in some situations we want the lval (effect address) of the item. |
|
39 |
// We provide a special factory for making those lvals. |
|
40 |
bool _is_lval; |
|
41 |
||
42 |
address _target; |
|
43 |
||
44 |
private: |
|
45 |
static relocInfo::relocType reloc_for_target(address target) { |
|
46 |
// Used for ExternalAddress or when the type is not specified |
|
47 |
// Sometimes ExternalAddress is used for values which aren't |
|
48 |
// exactly addresses, like the card table base. |
|
49 |
// external_word_type can't be used for values in the first page |
|
50 |
// so just skip the reloc in that case. |
|
51 |
return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none; |
|
52 |
} |
|
53 |
||
54 |
void set_rspec(relocInfo::relocType rtype); |
|
55 |
||
56 |
protected: |
|
57 |
// creation |
|
58 |
AddressLiteral() |
|
59 |
: _is_lval(false), |
|
60 |
_target(NULL) |
|
61 |
{} |
|
62 |
||
63 |
public: |
|
64 |
||
65 |
AddressLiteral(address target, relocInfo::relocType rtype) { |
|
66 |
_is_lval = false; |
|
67 |
_target = target; |
|
68 |
set_rspec(rtype); |
|
69 |
} |
|
70 |
||
71 |
AddressLiteral(address target, RelocationHolder const& rspec) |
|
72 |
: _rspec(rspec), |
|
73 |
_is_lval(false), |
|
74 |
_target(target) |
|
75 |
{} |
|
76 |
||
77 |
AddressLiteral(address target) { |
|
78 |
_is_lval = false; |
|
79 |
_target = target; |
|
80 |
set_rspec(reloc_for_target(target)); |
|
81 |
} |
|
82 |
||
83 |
AddressLiteral addr() { |
|
84 |
AddressLiteral ret = *this; |
|
85 |
ret._is_lval = true; |
|
86 |
return ret; |
|
87 |
} |
|
88 |
||
89 |
private: |
|
90 |
||
91 |
address target() { return _target; } |
|
92 |
bool is_lval() { return _is_lval; } |
|
93 |
||
94 |
relocInfo::relocType reloc() const { return _rspec.type(); } |
|
95 |
const RelocationHolder& rspec() const { return _rspec; } |
|
96 |
||
97 |
friend class Assembler; |
|
98 |
friend class MacroAssembler; |
|
99 |
friend class Address; |
|
100 |
friend class LIR_Assembler; |
|
101 |
friend class InlinedAddress; |
|
102 |
}; |
|
103 |
||
104 |
class ExternalAddress: public AddressLiteral { |
|
105 |
||
106 |
public: |
|
107 |
||
108 |
ExternalAddress(address target) : AddressLiteral(target) {} |
|
109 |
||
110 |
}; |
|
111 |
||
112 |
class InternalAddress: public AddressLiteral { |
|
113 |
||
114 |
public: |
|
115 |
||
116 |
InternalAddress(address target) : AddressLiteral(target, relocInfo::internal_word_type) {} |
|
117 |
||
118 |
}; |
|
119 |
||
120 |
// Inlined constants, for use with ldr_literal / bind_literal |
|
121 |
// Note: InlinedInteger not supported (use move_slow(Register,int[,cond])) |
|
122 |
class InlinedLiteral: StackObj { |
|
123 |
public: |
|
124 |
Label label; // need to be public for direct access with & |
|
125 |
InlinedLiteral() { |
|
126 |
} |
|
127 |
}; |
|
128 |
||
129 |
class InlinedMetadata: public InlinedLiteral { |
|
130 |
private: |
|
131 |
Metadata *_data; |
|
132 |
||
133 |
public: |
|
134 |
InlinedMetadata(Metadata *data): InlinedLiteral() { |
|
135 |
_data = data; |
|
136 |
} |
|
137 |
Metadata *data() { return _data; } |
|
138 |
}; |
|
139 |
||
140 |
// Currently unused |
|
141 |
// class InlinedOop: public InlinedLiteral { |
|
142 |
// private: |
|
143 |
// jobject _jobject; |
|
144 |
// |
|
145 |
// public: |
|
146 |
// InlinedOop(jobject target): InlinedLiteral() { |
|
147 |
// _jobject = target; |
|
148 |
// } |
|
149 |
// jobject jobject() { return _jobject; } |
|
150 |
// }; |
|
151 |
||
152 |
class InlinedAddress: public InlinedLiteral { |
|
153 |
private: |
|
154 |
AddressLiteral _literal; |
|
155 |
||
156 |
public: |
|
157 |
||
158 |
InlinedAddress(jobject object): InlinedLiteral(), _literal((address)object, relocInfo::oop_type) { |
|
159 |
ShouldNotReachHere(); // use mov_oop (or implement InlinedOop) |
|
160 |
} |
|
161 |
||
162 |
InlinedAddress(Metadata *data): InlinedLiteral(), _literal((address)data, relocInfo::metadata_type) { |
|
163 |
ShouldNotReachHere(); // use InlinedMetadata or mov_metadata |
|
164 |
} |
|
165 |
||
166 |
InlinedAddress(address target, const RelocationHolder &rspec): InlinedLiteral(), _literal(target, rspec) { |
|
167 |
assert(rspec.type() != relocInfo::oop_type, "Do not use InlinedAddress for oops"); |
|
168 |
assert(rspec.type() != relocInfo::metadata_type, "Do not use InlinedAddress for metadatas"); |
|
169 |
} |
|
170 |
||
171 |
InlinedAddress(address target, relocInfo::relocType rtype): InlinedLiteral(), _literal(target, rtype) { |
|
172 |
assert(rtype != relocInfo::oop_type, "Do not use InlinedAddress for oops"); |
|
173 |
assert(rtype != relocInfo::metadata_type, "Do not use InlinedAddress for metadatas"); |
|
174 |
} |
|
175 |
||
176 |
// Note: default is relocInfo::none for InlinedAddress |
|
177 |
InlinedAddress(address target): InlinedLiteral(), _literal(target, relocInfo::none) { |
|
178 |
} |
|
179 |
||
180 |
address target() { return _literal.target(); } |
|
181 |
||
182 |
const RelocationHolder& rspec() const { return _literal.rspec(); } |
|
183 |
}; |
|
184 |
||
185 |
class InlinedString: public InlinedLiteral { |
|
186 |
private: |
|
187 |
const char* _msg; |
|
188 |
||
189 |
public: |
|
190 |
InlinedString(const char* msg): InlinedLiteral() { |
|
191 |
_msg = msg; |
|
192 |
} |
|
193 |
const char* msg() { return _msg; } |
|
194 |
}; |
|
195 |
||
196 |
class MacroAssembler: public Assembler { |
|
197 |
protected: |
|
198 |
||
199 |
// Support for VM calls |
|
200 |
// |
|
201 |
||
202 |
// This is the base routine called by the different versions of call_VM_leaf. |
|
203 |
void call_VM_leaf_helper(address entry_point, int number_of_arguments); |
|
204 |
||
205 |
// This is the base routine called by the different versions of call_VM. The interpreter |
|
206 |
// may customize this version by overriding it for its purposes (e.g., to save/restore |
|
207 |
// additional registers when doing a VM call). |
|
208 |
virtual void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions); |
|
46294
345a46524a19
8172020: Internal Error (cpu/arm/vm/frame_arm.cpp:571): assert(obj == __null || Universe::heap()->is_in(obj)) failed: sanity check #
cjplummer
parents:
42664
diff
changeset
|
209 |
public: |
345a46524a19
8172020: Internal Error (cpu/arm/vm/frame_arm.cpp:571): assert(obj == __null || Universe::heap()->is_in(obj)) failed: sanity check #
cjplummer
parents:
42664
diff
changeset
|
210 |
|
345a46524a19
8172020: Internal Error (cpu/arm/vm/frame_arm.cpp:571): assert(obj == __null || Universe::heap()->is_in(obj)) failed: sanity check #
cjplummer
parents:
42664
diff
changeset
|
211 |
MacroAssembler(CodeBuffer* code) : Assembler(code) {} |
42664 | 212 |
|
213 |
// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. |
|
214 |
// The implementation is only non-empty for the InterpreterMacroAssembler, |
|
215 |
// as only the interpreter handles PopFrame and ForceEarlyReturn requests. |
|
216 |
virtual void check_and_handle_popframe() {} |
|
217 |
virtual void check_and_handle_earlyret() {} |
|
218 |
||
219 |
// By default, we do not need relocation information for non |
|
220 |
// patchable absolute addresses. However, when needed by some |
|
221 |
// extensions, ignore_non_patchable_relocations can be modified, |
|
222 |
// returning false to preserve all relocation information. |
|
223 |
inline bool ignore_non_patchable_relocations() { return true; } |
|
224 |
||
225 |
// Initially added to the Assembler interface as a pure virtual: |
|
226 |
// RegisterConstant delayed_value(..) |
|
227 |
// for: |
|
228 |
// 6812678 macro assembler needs delayed binding of a few constants (for 6655638) |
|
229 |
// this was subsequently modified to its present name and return type |
|
230 |
virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, Register tmp, int offset); |
|
231 |
||
232 |
#ifdef AARCH64 |
|
233 |
# define NOT_IMPLEMENTED() unimplemented("NYI at " __FILE__ ":" XSTR(__LINE__)) |
|
234 |
# define NOT_TESTED() warn("Not tested at " __FILE__ ":" XSTR(__LINE__)) |
|
235 |
#endif |
|
236 |
||
237 |
void align(int modulus); |
|
238 |
||
239 |
// Support for VM calls |
|
240 |
// |
|
241 |
// It is imperative that all calls into the VM are handled via the call_VM methods. |
|
242 |
// They make sure that the stack linkage is setup correctly. call_VM's correspond |
|
243 |
// to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. |
|
244 |
||
245 |
void call_VM(Register oop_result, address entry_point, bool check_exceptions = true); |
|
246 |
void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true); |
|
247 |
void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); |
|
248 |
void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); |
|
249 |
||
250 |
// The following methods are required by templateTable.cpp, |
|
251 |
// but not used on ARM. |
|
252 |
void call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); |
|
253 |
void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); |
|
254 |
void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); |
|
255 |
void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); |
|
256 |
||
257 |
// Note: The super_call_VM calls are not used on ARM |
|
258 |
||
259 |
// Raw call, without saving/restoring registers, exception handling, etc. |
|
260 |
// Mainly used from various stubs. |
|
261 |
// Note: if 'save_R9_if_scratched' is true, call_VM may on some |
|
262 |
// platforms save values on the stack. Set it to false (and handle |
|
263 |
// R9 in the callers) if the top of the stack must not be modified |
|
264 |
// by call_VM. |
|
265 |
void call_VM(address entry_point, bool save_R9_if_scratched); |
|
266 |
||
267 |
void call_VM_leaf(address entry_point); |
|
268 |
void call_VM_leaf(address entry_point, Register arg_1); |
|
269 |
void call_VM_leaf(address entry_point, Register arg_1, Register arg_2); |
|
270 |
void call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); |
|
271 |
void call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); |
|
272 |
||
273 |
void get_vm_result(Register oop_result, Register tmp); |
|
274 |
void get_vm_result_2(Register metadata_result, Register tmp); |
|
275 |
||
276 |
// Always sets/resets sp, which default to SP if (last_sp == noreg) |
|
277 |
// Optionally sets/resets fp (use noreg to avoid setting it) |
|
278 |
// Always sets/resets pc on AArch64; optionally sets/resets pc on 32-bit ARM depending on save_last_java_pc flag |
|
279 |
// Note: when saving PC, set_last_Java_frame returns PC's offset in the code section |
|
280 |
// (for oop_maps offset computation) |
|
281 |
int set_last_Java_frame(Register last_sp, Register last_fp, bool save_last_java_pc, Register tmp); |
|
282 |
void reset_last_Java_frame(Register tmp); |
|
283 |
// status set in set_last_Java_frame for reset_last_Java_frame |
|
284 |
bool _fp_saved; |
|
285 |
bool _pc_saved; |
|
286 |
||
287 |
#ifdef PRODUCT |
|
288 |
#define BLOCK_COMMENT(str) /* nothing */ |
|
289 |
#define STOP(error) __ stop(error) |
|
290 |
#else |
|
291 |
#define BLOCK_COMMENT(str) __ block_comment(str) |
|
292 |
#define STOP(error) __ block_comment(error); __ stop(error) |
|
293 |
#endif |
|
294 |
||
295 |
void lookup_virtual_method(Register recv_klass, |
|
296 |
Register vtable_index, |
|
297 |
Register method_result); |
|
298 |
||
299 |
// Test sub_klass against super_klass, with fast and slow paths. |
|
300 |
||
301 |
// The fast path produces a tri-state answer: yes / no / maybe-slow. |
|
302 |
// One of the three labels can be NULL, meaning take the fall-through. |
|
303 |
// No registers are killed, except temp_regs. |
|
304 |
void check_klass_subtype_fast_path(Register sub_klass, |
|
305 |
Register super_klass, |
|
306 |
Register temp_reg, |
|
307 |
Register temp_reg2, |
|
308 |
Label* L_success, |
|
309 |
Label* L_failure, |
|
310 |
Label* L_slow_path); |
|
311 |
||
312 |
// The rest of the type check; must be wired to a corresponding fast path. |
|
313 |
// It does not repeat the fast path logic, so don't use it standalone. |
|
314 |
// temp_reg3 can be noreg, if no temps are available. |
|
315 |
// Updates the sub's secondary super cache as necessary. |
|
316 |
// If set_cond_codes: |
|
317 |
// - condition codes will be Z on success, NZ on failure. |
|
318 |
// - temp_reg will be 0 on success, non-0 on failure |
|
319 |
void check_klass_subtype_slow_path(Register sub_klass, |
|
320 |
Register super_klass, |
|
321 |
Register temp_reg, |
|
322 |
Register temp_reg2, |
|
323 |
Register temp_reg3, // auto assigned if noreg |
|
324 |
Label* L_success, |
|
325 |
Label* L_failure, |
|
326 |
bool set_cond_codes = false); |
|
327 |
||
328 |
// Simplified, combined version, good for typical uses. |
|
329 |
// temp_reg3 can be noreg, if no temps are available. It is used only on slow path. |
|
330 |
// Falls through on failure. |
|
331 |
void check_klass_subtype(Register sub_klass, |
|
332 |
Register super_klass, |
|
333 |
Register temp_reg, |
|
334 |
Register temp_reg2, |
|
335 |
Register temp_reg3, // auto assigned on slow path if noreg |
|
336 |
Label& L_success); |
|
337 |
||
338 |
// Returns address of receiver parameter, using tmp as base register. tmp and params_count can be the same. |
|
339 |
Address receiver_argument_address(Register params_base, Register params_count, Register tmp); |
|
340 |
||
341 |
void _verify_oop(Register reg, const char* s, const char* file, int line); |
|
342 |
void _verify_oop_addr(Address addr, const char * s, const char* file, int line); |
|
343 |
||
344 |
// TODO: verify method and klass metadata (compare against vptr?) |
|
345 |
void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} |
|
346 |
void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line) {} |
|
347 |
||
348 |
#define verify_oop(reg) _verify_oop(reg, "broken oop " #reg, __FILE__, __LINE__) |
|
349 |
#define verify_oop_addr(addr) _verify_oop_addr(addr, "broken oop ", __FILE__, __LINE__) |
|
350 |
#define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) |
|
351 |
#define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) |
|
352 |
||
353 |
void null_check(Register reg, Register tmp, int offset = -1); |
|
354 |
inline void null_check(Register reg) { null_check(reg, noreg, -1); } // for C1 lir_null_check |
|
355 |
||
356 |
// Puts address of allocated object into register `obj` and end of allocated object into register `obj_end`. |
|
357 |
void eden_allocate(Register obj, Register obj_end, Register tmp1, Register tmp2, |
|
358 |
RegisterOrConstant size_expression, Label& slow_case); |
|
359 |
void tlab_allocate(Register obj, Register obj_end, Register tmp1, |
|
360 |
RegisterOrConstant size_expression, Label& slow_case); |
|
361 |
||
362 |
void zero_memory(Register start, Register end, Register tmp); |
|
363 |
||
364 |
void incr_allocated_bytes(RegisterOrConstant size_in_bytes, Register tmp); |
|
365 |
||
366 |
static bool needs_explicit_null_check(intptr_t offset); |
|
367 |
||
368 |
void arm_stack_overflow_check(int frame_size_in_bytes, Register tmp); |
|
369 |
void arm_stack_overflow_check(Register Rsize, Register tmp); |
|
370 |
||
371 |
void bang_stack_with_offset(int offset) { |
|
372 |
ShouldNotReachHere(); |
|
373 |
} |
|
374 |
||
375 |
// Biased locking support |
|
376 |
// lock_reg and obj_reg must be loaded up with the appropriate values. |
|
377 |
// swap_reg must be supplied. |
|
378 |
// tmp_reg must be supplied. |
|
379 |
// Optional slow case is for implementations (interpreter and C1) which branch to |
|
380 |
// slow case directly. If slow_case is NULL, then leaves condition |
|
381 |
// codes set (for C2's Fast_Lock node) and jumps to done label. |
|
382 |
// Falls through for the fast locking attempt. |
|
383 |
// Returns offset of first potentially-faulting instruction for null |
|
384 |
// check info (currently consumed only by C1). If |
|
385 |
// swap_reg_contains_mark is true then returns -1 as it is assumed |
|
386 |
// the calling code has already passed any potential faults. |
|
387 |
// Notes: |
|
388 |
// - swap_reg and tmp_reg are scratched |
|
389 |
// - Rtemp was (implicitly) scratched and can now be specified as the tmp2 |
|
390 |
int biased_locking_enter(Register obj_reg, Register swap_reg, Register tmp_reg, |
|
391 |
bool swap_reg_contains_mark, |
|
392 |
Register tmp2, |
|
393 |
Label& done, Label& slow_case, |
|
394 |
BiasedLockingCounters* counters = NULL); |
|
395 |
void biased_locking_exit(Register obj_reg, Register temp_reg, Label& done); |
|
396 |
||
397 |
// Building block for CAS cases of biased locking: makes CAS and records statistics. |
|
398 |
// Optional slow_case label is used to transfer control if CAS fails. Otherwise leaves condition codes set. |
|
399 |
void biased_locking_enter_with_cas(Register obj_reg, Register old_mark_reg, Register new_mark_reg, |
|
400 |
Register tmp, Label& slow_case, int* counter_addr); |
|
401 |
||
44406
a46a6c4d1dd9
8176100: [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles
mgerdin
parents:
44093
diff
changeset
|
402 |
void resolve_jobject(Register value, Register tmp1, Register tmp2); |
a46a6c4d1dd9
8176100: [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles
mgerdin
parents:
44093
diff
changeset
|
403 |
|
a46a6c4d1dd9
8176100: [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles
mgerdin
parents:
44093
diff
changeset
|
404 |
#if INCLUDE_ALL_GCS |
a46a6c4d1dd9
8176100: [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles
mgerdin
parents:
44093
diff
changeset
|
405 |
// G1 pre-barrier. |
a46a6c4d1dd9
8176100: [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles
mgerdin
parents:
44093
diff
changeset
|
406 |
// Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR). |
a46a6c4d1dd9
8176100: [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles
mgerdin
parents:
44093
diff
changeset
|
407 |
// If store_addr != noreg, then previous value is loaded from [store_addr]; |
a46a6c4d1dd9
8176100: [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles
mgerdin
parents:
44093
diff
changeset
|
408 |
// in such case store_addr and new_val registers are preserved; |
a46a6c4d1dd9
8176100: [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles
mgerdin
parents:
44093
diff
changeset
|
409 |
// otherwise pre_val register is preserved. |
a46a6c4d1dd9
8176100: [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles
mgerdin
parents:
44093
diff
changeset
|
410 |
void g1_write_barrier_pre(Register store_addr, |
a46a6c4d1dd9
8176100: [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles
mgerdin
parents:
44093
diff
changeset
|
411 |
Register new_val, |
a46a6c4d1dd9
8176100: [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles
mgerdin
parents:
44093
diff
changeset
|
412 |
Register pre_val, |
a46a6c4d1dd9
8176100: [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles
mgerdin
parents:
44093
diff
changeset
|
413 |
Register tmp1, |
a46a6c4d1dd9
8176100: [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles
mgerdin
parents:
44093
diff
changeset
|
414 |
Register tmp2); |
a46a6c4d1dd9
8176100: [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles
mgerdin
parents:
44093
diff
changeset
|
415 |
|
a46a6c4d1dd9
8176100: [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles
mgerdin
parents:
44093
diff
changeset
|
416 |
// G1 post-barrier. |
a46a6c4d1dd9
8176100: [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles
mgerdin
parents:
44093
diff
changeset
|
417 |
// Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR). |
a46a6c4d1dd9
8176100: [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles
mgerdin
parents:
44093
diff
changeset
|
418 |
void g1_write_barrier_post(Register store_addr, |
a46a6c4d1dd9
8176100: [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles
mgerdin
parents:
44093
diff
changeset
|
419 |
Register new_val, |
a46a6c4d1dd9
8176100: [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles
mgerdin
parents:
44093
diff
changeset
|
420 |
Register tmp1, |
a46a6c4d1dd9
8176100: [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles
mgerdin
parents:
44093
diff
changeset
|
421 |
Register tmp2, |
a46a6c4d1dd9
8176100: [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles
mgerdin
parents:
44093
diff
changeset
|
422 |
Register tmp3); |
a46a6c4d1dd9
8176100: [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles
mgerdin
parents:
44093
diff
changeset
|
423 |
#endif // INCLUDE_ALL_GCS |
a46a6c4d1dd9
8176100: [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles
mgerdin
parents:
44093
diff
changeset
|
424 |
|
42664 | 425 |
#ifndef AARCH64 |
426 |
void nop() { |
|
427 |
mov(R0, R0); |
|
428 |
} |
|
429 |
||
430 |
void push(Register rd, AsmCondition cond = al) { |
|
431 |
assert(rd != SP, "unpredictable instruction"); |
|
432 |
str(rd, Address(SP, -wordSize, pre_indexed), cond); |
|
433 |
} |
|
434 |
||
435 |
void push(RegisterSet reg_set, AsmCondition cond = al) { |
|
436 |
assert(!reg_set.contains(SP), "unpredictable instruction"); |
|
437 |
stmdb(SP, reg_set, writeback, cond); |
|
438 |
} |
|
439 |
||
440 |
void pop(Register rd, AsmCondition cond = al) { |
|
441 |
assert(rd != SP, "unpredictable instruction"); |
|
442 |
ldr(rd, Address(SP, wordSize, post_indexed), cond); |
|
443 |
} |
|
444 |
||
445 |
void pop(RegisterSet reg_set, AsmCondition cond = al) { |
|
446 |
assert(!reg_set.contains(SP), "unpredictable instruction"); |
|
447 |
ldmia(SP, reg_set, writeback, cond); |
|
448 |
} |
|
449 |
||
450 |
void fpushd(FloatRegister fd, AsmCondition cond = al) { |
|
451 |
fstmdbd(SP, FloatRegisterSet(fd), writeback, cond); |
|
452 |
} |
|
453 |
||
454 |
void fpushs(FloatRegister fd, AsmCondition cond = al) { |
|
455 |
fstmdbs(SP, FloatRegisterSet(fd), writeback, cond); |
|
456 |
} |
|
457 |
||
458 |
void fpopd(FloatRegister fd, AsmCondition cond = al) { |
|
459 |
fldmiad(SP, FloatRegisterSet(fd), writeback, cond); |
|
460 |
} |
|
461 |
||
462 |
void fpops(FloatRegister fd, AsmCondition cond = al) { |
|
463 |
fldmias(SP, FloatRegisterSet(fd), writeback, cond); |
|
464 |
} |
|
465 |
#endif // !AARCH64 |
|
466 |
||
467 |
// Order access primitives |
|
468 |
enum Membar_mask_bits { |
|
469 |
StoreStore = 1 << 3, |
|
470 |
LoadStore = 1 << 2, |
|
471 |
StoreLoad = 1 << 1, |
|
472 |
LoadLoad = 1 << 0 |
|
473 |
}; |
|
474 |
||
475 |
#ifdef AARCH64 |
|
476 |
// tmp register is not used on AArch64, this parameter is provided solely for better compatibility with 32-bit ARM |
|
477 |
void membar(Membar_mask_bits order_constraint, Register tmp = noreg); |
|
478 |
#else |
|
479 |
void membar(Membar_mask_bits mask, |
|
480 |
Register tmp, |
|
481 |
bool preserve_flags = true, |
|
482 |
Register load_tgt = noreg); |
|
483 |
#endif |
|
484 |
||
485 |
void breakpoint(AsmCondition cond = al); |
|
486 |
void stop(const char* msg); |
|
487 |
// prints msg and continues |
|
488 |
void warn(const char* msg); |
|
489 |
void unimplemented(const char* what = ""); |
|
490 |
void should_not_reach_here() { stop("should not reach here"); } |
|
491 |
static void debug(const char* msg, const intx* registers); |
|
492 |
||
493 |
// Create a walkable frame to help tracking down who called this code. |
|
494 |
// Returns the frame size in words. |
|
495 |
int should_not_call_this() { |
|
496 |
raw_push(FP, LR); |
|
497 |
should_not_reach_here(); |
|
498 |
flush(); |
|
499 |
return 2; // frame_size_in_words (FP+LR) |
|
500 |
} |
|
501 |
||
502 |
int save_all_registers(); |
|
503 |
void restore_all_registers(); |
|
504 |
int save_caller_save_registers(); |
|
505 |
void restore_caller_save_registers(); |
|
506 |
||
507 |
void add_rc(Register dst, Register arg1, RegisterOrConstant arg2); |
|
508 |
||
509 |
// add_slow and mov_slow are used to manipulate offsets larger than 1024, |
|
510 |
// these functions are not expected to handle all possible constants, |
|
511 |
// only those that can really occur during compilation |
|
512 |
void add_slow(Register rd, Register rn, int c); |
|
513 |
void sub_slow(Register rd, Register rn, int c); |
|
514 |
||
515 |
#ifdef AARCH64 |
|
516 |
static int mov_slow_helper(Register rd, intptr_t c, MacroAssembler* masm /* optional */); |
|
517 |
#endif |
|
518 |
||
519 |
void mov_slow(Register rd, intptr_t c NOT_AARCH64_ARG(AsmCondition cond = al)); |
|
520 |
void mov_slow(Register rd, const char *string); |
|
521 |
void mov_slow(Register rd, address addr); |
|
522 |
||
523 |
void patchable_mov_oop(Register rd, jobject o, int oop_index) { |
|
524 |
mov_oop(rd, o, oop_index AARCH64_ONLY_ARG(true)); |
|
525 |
} |
|
526 |
void mov_oop(Register rd, jobject o, int index = 0 |
|
527 |
AARCH64_ONLY_ARG(bool patchable = false) |
|
528 |
NOT_AARCH64_ARG(AsmCondition cond = al)); |
|
529 |
||
530 |
||
531 |
void patchable_mov_metadata(Register rd, Metadata* o, int index) { |
|
532 |
mov_metadata(rd, o, index AARCH64_ONLY_ARG(true)); |
|
533 |
} |
|
534 |
void mov_metadata(Register rd, Metadata* o, int index = 0 AARCH64_ONLY_ARG(bool patchable = false)); |
|
535 |
||
536 |
void mov_float(FloatRegister fd, jfloat c NOT_AARCH64_ARG(AsmCondition cond = al)); |
|
537 |
void mov_double(FloatRegister fd, jdouble c NOT_AARCH64_ARG(AsmCondition cond = al)); |
|
538 |
||
539 |
#ifdef AARCH64 |
|
540 |
int mov_pc_to(Register rd) { |
|
541 |
Label L; |
|
542 |
adr(rd, L); |
|
543 |
bind(L); |
|
544 |
return offset(); |
|
545 |
} |
|
546 |
#endif |
|
547 |
||
548 |
// Note: this variant of mov_address assumes the address moves with |
|
549 |
// the code. Do *not* implement it with non-relocated instructions, |
|
550 |
// unless PC-relative. |
|
551 |
#ifdef AARCH64 |
|
552 |
void mov_relative_address(Register rd, address addr) { |
|
553 |
adr(rd, addr); |
|
554 |
} |
|
555 |
#else |
|
556 |
void mov_relative_address(Register rd, address addr, AsmCondition cond = al) { |
|
557 |
int offset = addr - pc() - 8; |
|
558 |
assert((offset & 3) == 0, "bad alignment"); |
|
559 |
if (offset >= 0) { |
|
560 |
assert(AsmOperand::is_rotated_imm(offset), "addr too far"); |
|
561 |
add(rd, PC, offset, cond); |
|
562 |
} else { |
|
563 |
assert(AsmOperand::is_rotated_imm(-offset), "addr too far"); |
|
564 |
sub(rd, PC, -offset, cond); |
|
565 |
} |
|
566 |
} |
|
567 |
#endif // AARCH64 |
|
568 |
||
569 |
// Runtime address that may vary from one execution to another. The |
|
570 |
// symbolic_reference describes what the address is, allowing |
|
571 |
// the address to be resolved in a different execution context. |
|
572 |
// Warning: do not implement as a PC relative address. |
|
573 |
void mov_address(Register rd, address addr, symbolic_Relocation::symbolic_reference t) { |
|
574 |
mov_address(rd, addr, RelocationHolder::none); |
|
575 |
} |
|
576 |
||
577 |
// rspec can be RelocationHolder::none (for ignored symbolic_Relocation). |
|
578 |
// In that case, the address is absolute and the generated code need |
|
579 |
// not be relocable. |
|
580 |
void mov_address(Register rd, address addr, RelocationHolder const& rspec) { |
|
581 |
assert(rspec.type() != relocInfo::runtime_call_type, "do not use mov_address for runtime calls"); |
|
582 |
assert(rspec.type() != relocInfo::static_call_type, "do not use mov_address for relocable calls"); |
|
583 |
if (rspec.type() == relocInfo::none) { |
|
584 |
// absolute address, relocation not needed |
|
585 |
mov_slow(rd, (intptr_t)addr); |
|
586 |
return; |
|
587 |
} |
|
588 |
#ifndef AARCH64 |
|
589 |
if (VM_Version::supports_movw()) { |
|
590 |
relocate(rspec); |
|
591 |
int c = (int)addr; |
|
592 |
movw(rd, c & 0xffff); |
|
593 |
if ((unsigned int)c >> 16) { |
|
594 |
movt(rd, (unsigned int)c >> 16); |
|
595 |
} |
|
596 |
return; |
|
597 |
} |
|
598 |
#endif |
|
599 |
Label skip_literal; |
|
600 |
InlinedAddress addr_literal(addr, rspec); |
|
601 |
ldr_literal(rd, addr_literal); |
|
602 |
b(skip_literal); |
|
603 |
bind_literal(addr_literal); |
|
604 |
// AARCH64 WARNING: because of alignment padding, extra padding |
|
605 |
// may be required to get a consistent size for C2, or rules must |
|
606 |
// overestimate size see MachEpilogNode::size |
|
607 |
bind(skip_literal); |
|
608 |
} |
|
609 |
||
610 |
// Note: Do not define mov_address for a Label |
|
611 |
// |
|
612 |
// Load from addresses potentially within the code are now handled |
|
613 |
// InlinedLiteral subclasses (to allow more flexibility on how the |
|
614 |
// ldr_literal is performed). |
|
615 |
||
616 |
void ldr_literal(Register rd, InlinedAddress& L) { |
|
617 |
assert(L.rspec().type() != relocInfo::runtime_call_type, "avoid ldr_literal for calls"); |
|
618 |
assert(L.rspec().type() != relocInfo::static_call_type, "avoid ldr_literal for calls"); |
|
619 |
relocate(L.rspec()); |
|
620 |
#ifdef AARCH64 |
|
621 |
ldr(rd, target(L.label)); |
|
622 |
#else |
|
623 |
ldr(rd, Address(PC, target(L.label) - pc() - 8)); |
|
624 |
#endif |
|
625 |
} |
|
626 |
||
627 |
void ldr_literal(Register rd, InlinedString& L) { |
|
628 |
const char* msg = L.msg(); |
|
629 |
if (code()->consts()->contains((address)msg)) { |
|
630 |
// string address moves with the code |
|
631 |
#ifdef AARCH64 |
|
632 |
ldr(rd, (address)msg); |
|
633 |
#else |
|
634 |
ldr(rd, Address(PC, ((address)msg) - pc() - 8)); |
|
635 |
#endif |
|
636 |
return; |
|
637 |
} |
|
638 |
// Warning: use external strings with care. They are not relocated |
|
639 |
// if the code moves. If needed, use code_string to move them |
|
640 |
// to the consts section. |
|
641 |
#ifdef AARCH64 |
|
642 |
ldr(rd, target(L.label)); |
|
643 |
#else |
|
644 |
ldr(rd, Address(PC, target(L.label) - pc() - 8)); |
|
645 |
#endif |
|
646 |
} |
|
647 |
||
648 |
void ldr_literal(Register rd, InlinedMetadata& L) { |
|
649 |
// relocation done in the bind_literal for metadatas |
|
650 |
#ifdef AARCH64 |
|
651 |
ldr(rd, target(L.label)); |
|
652 |
#else |
|
653 |
ldr(rd, Address(PC, target(L.label) - pc() - 8)); |
|
654 |
#endif |
|
655 |
} |
|
656 |
||
657 |
void bind_literal(InlinedAddress& L) { |
|
658 |
AARCH64_ONLY(align(wordSize)); |
|
659 |
bind(L.label); |
|
660 |
assert(L.rspec().type() != relocInfo::metadata_type, "Must use InlinedMetadata"); |
|
661 |
// We currently do not use oop 'bound' literals. |
|
662 |
// If the code evolves and the following assert is triggered, |
|
663 |
// we need to implement InlinedOop (see InlinedMetadata). |
|
664 |
assert(L.rspec().type() != relocInfo::oop_type, "Inlined oops not supported"); |
|
665 |
// Note: relocation is handled by relocate calls in ldr_literal |
|
666 |
AbstractAssembler::emit_address((address)L.target()); |
|
667 |
} |
|
668 |
||
669 |
void bind_literal(InlinedString& L) { |
|
670 |
const char* msg = L.msg(); |
|
671 |
if (code()->consts()->contains((address)msg)) { |
|
672 |
// The Label should not be used; avoid binding it |
|
673 |
// to detect errors. |
|
674 |
return; |
|
675 |
} |
|
676 |
AARCH64_ONLY(align(wordSize)); |
|
677 |
bind(L.label); |
|
678 |
AbstractAssembler::emit_address((address)L.msg()); |
|
679 |
} |
|
680 |
||
681 |
void bind_literal(InlinedMetadata& L) { |
|
682 |
AARCH64_ONLY(align(wordSize)); |
|
683 |
bind(L.label); |
|
684 |
relocate(metadata_Relocation::spec_for_immediate()); |
|
685 |
AbstractAssembler::emit_address((address)L.data()); |
|
686 |
} |
|
687 |
||
46961
c9094b1e5f87
8186088: ConstantPoolCache::_resolved_references is not a JNIHandle
coleenp
parents:
46369
diff
changeset
|
688 |
void resolve_oop_handle(Register result); |
42664 | 689 |
void load_mirror(Register mirror, Register method, Register tmp); |
690 |
||
691 |
// Porting layer between 32-bit ARM and AArch64 |
|
692 |
||
693 |
#define COMMON_INSTR_1(common_mnemonic, aarch64_mnemonic, arm32_mnemonic, arg_type) \ |
|
694 |
void common_mnemonic(arg_type arg) { \ |
|
695 |
AARCH64_ONLY(aarch64_mnemonic) NOT_AARCH64(arm32_mnemonic) (arg); \ |
|
696 |
} |
|
697 |
||
698 |
#define COMMON_INSTR_2(common_mnemonic, aarch64_mnemonic, arm32_mnemonic, arg1_type, arg2_type) \ |
|
699 |
void common_mnemonic(arg1_type arg1, arg2_type arg2) { \ |
|
700 |
AARCH64_ONLY(aarch64_mnemonic) NOT_AARCH64(arm32_mnemonic) (arg1, arg2); \ |
|
701 |
} |
|
702 |
||
703 |
#define COMMON_INSTR_3(common_mnemonic, aarch64_mnemonic, arm32_mnemonic, arg1_type, arg2_type, arg3_type) \ |
|
704 |
void common_mnemonic(arg1_type arg1, arg2_type arg2, arg3_type arg3) { \ |
|
705 |
AARCH64_ONLY(aarch64_mnemonic) NOT_AARCH64(arm32_mnemonic) (arg1, arg2, arg3); \ |
|
706 |
} |
|
707 |
||
708 |
COMMON_INSTR_1(jump, br, bx, Register) |
|
709 |
COMMON_INSTR_1(call, blr, blx, Register) |
|
710 |
||
711 |
COMMON_INSTR_2(cbz_32, cbz_w, cbz, Register, Label&) |
|
712 |
COMMON_INSTR_2(cbnz_32, cbnz_w, cbnz, Register, Label&) |
|
713 |
||
714 |
COMMON_INSTR_2(ldr_u32, ldr_w, ldr, Register, Address) |
|
715 |
COMMON_INSTR_2(ldr_s32, ldrsw, ldr, Register, Address) |
|
716 |
COMMON_INSTR_2(str_32, str_w, str, Register, Address) |
|
717 |
||
718 |
COMMON_INSTR_2(mvn_32, mvn_w, mvn, Register, Register) |
|
719 |
COMMON_INSTR_2(cmp_32, cmp_w, cmp, Register, Register) |
|
720 |
COMMON_INSTR_2(neg_32, neg_w, neg, Register, Register) |
|
721 |
COMMON_INSTR_2(clz_32, clz_w, clz, Register, Register) |
|
722 |
COMMON_INSTR_2(rbit_32, rbit_w, rbit, Register, Register) |
|
723 |
||
724 |
COMMON_INSTR_2(cmp_32, cmp_w, cmp, Register, int) |
|
725 |
COMMON_INSTR_2(cmn_32, cmn_w, cmn, Register, int) |
|
726 |
||
727 |
COMMON_INSTR_3(add_32, add_w, add, Register, Register, Register) |
|
728 |
COMMON_INSTR_3(sub_32, sub_w, sub, Register, Register, Register) |
|
729 |
COMMON_INSTR_3(subs_32, subs_w, subs, Register, Register, Register) |
|
730 |
COMMON_INSTR_3(mul_32, mul_w, mul, Register, Register, Register) |
|
731 |
COMMON_INSTR_3(and_32, andr_w, andr, Register, Register, Register) |
|
732 |
COMMON_INSTR_3(orr_32, orr_w, orr, Register, Register, Register) |
|
733 |
COMMON_INSTR_3(eor_32, eor_w, eor, Register, Register, Register) |
|
734 |
||
735 |
COMMON_INSTR_3(add_32, add_w, add, Register, Register, AsmOperand) |
|
736 |
COMMON_INSTR_3(sub_32, sub_w, sub, Register, Register, AsmOperand) |
|
737 |
COMMON_INSTR_3(orr_32, orr_w, orr, Register, Register, AsmOperand) |
|
738 |
COMMON_INSTR_3(eor_32, eor_w, eor, Register, Register, AsmOperand) |
|
739 |
COMMON_INSTR_3(and_32, andr_w, andr, Register, Register, AsmOperand) |
|
740 |
||
741 |
||
742 |
COMMON_INSTR_3(add_32, add_w, add, Register, Register, int) |
|
743 |
COMMON_INSTR_3(adds_32, adds_w, adds, Register, Register, int) |
|
744 |
COMMON_INSTR_3(sub_32, sub_w, sub, Register, Register, int) |
|
745 |
COMMON_INSTR_3(subs_32, subs_w, subs, Register, Register, int) |
|
746 |
||
747 |
COMMON_INSTR_2(tst_32, tst_w, tst, Register, unsigned int) |
|
748 |
COMMON_INSTR_2(tst_32, tst_w, tst, Register, AsmOperand) |
|
749 |
||
750 |
COMMON_INSTR_3(and_32, andr_w, andr, Register, Register, uint) |
|
751 |
COMMON_INSTR_3(orr_32, orr_w, orr, Register, Register, uint) |
|
752 |
COMMON_INSTR_3(eor_32, eor_w, eor, Register, Register, uint) |
|
753 |
||
754 |
COMMON_INSTR_1(cmp_zero_float, fcmp0_s, fcmpzs, FloatRegister) |
|
755 |
COMMON_INSTR_1(cmp_zero_double, fcmp0_d, fcmpzd, FloatRegister) |
|
756 |
||
757 |
COMMON_INSTR_2(ldr_float, ldr_s, flds, FloatRegister, Address) |
|
758 |
COMMON_INSTR_2(str_float, str_s, fsts, FloatRegister, Address) |
|
759 |
COMMON_INSTR_2(mov_float, fmov_s, fcpys, FloatRegister, FloatRegister) |
|
760 |
COMMON_INSTR_2(neg_float, fneg_s, fnegs, FloatRegister, FloatRegister) |
|
761 |
COMMON_INSTR_2(abs_float, fabs_s, fabss, FloatRegister, FloatRegister) |
|
762 |
COMMON_INSTR_2(sqrt_float, fsqrt_s, fsqrts, FloatRegister, FloatRegister) |
|
763 |
COMMON_INSTR_2(cmp_float, fcmp_s, fcmps, FloatRegister, FloatRegister) |
|
764 |
||
765 |
COMMON_INSTR_3(add_float, fadd_s, fadds, FloatRegister, FloatRegister, FloatRegister) |
|
766 |
COMMON_INSTR_3(sub_float, fsub_s, fsubs, FloatRegister, FloatRegister, FloatRegister) |
|
767 |
COMMON_INSTR_3(mul_float, fmul_s, fmuls, FloatRegister, FloatRegister, FloatRegister) |
|
768 |
COMMON_INSTR_3(div_float, fdiv_s, fdivs, FloatRegister, FloatRegister, FloatRegister) |
|
769 |
||
770 |
COMMON_INSTR_2(ldr_double, ldr_d, fldd, FloatRegister, Address) |
|
771 |
COMMON_INSTR_2(str_double, str_d, fstd, FloatRegister, Address) |
|
772 |
COMMON_INSTR_2(mov_double, fmov_d, fcpyd, FloatRegister, FloatRegister) |
|
773 |
COMMON_INSTR_2(neg_double, fneg_d, fnegd, FloatRegister, FloatRegister) |
|
774 |
COMMON_INSTR_2(cmp_double, fcmp_d, fcmpd, FloatRegister, FloatRegister) |
|
775 |
COMMON_INSTR_2(abs_double, fabs_d, fabsd, FloatRegister, FloatRegister) |
|
776 |
COMMON_INSTR_2(sqrt_double, fsqrt_d, fsqrtd, FloatRegister, FloatRegister) |
|
777 |
||
778 |
COMMON_INSTR_3(add_double, fadd_d, faddd, FloatRegister, FloatRegister, FloatRegister) |
|
779 |
COMMON_INSTR_3(sub_double, fsub_d, fsubd, FloatRegister, FloatRegister, FloatRegister) |
|
780 |
COMMON_INSTR_3(mul_double, fmul_d, fmuld, FloatRegister, FloatRegister, FloatRegister) |
|
781 |
COMMON_INSTR_3(div_double, fdiv_d, fdivd, FloatRegister, FloatRegister, FloatRegister) |
|
782 |
||
783 |
COMMON_INSTR_2(convert_f2d, fcvt_ds, fcvtds, FloatRegister, FloatRegister) |
|
784 |
COMMON_INSTR_2(convert_d2f, fcvt_sd, fcvtsd, FloatRegister, FloatRegister) |
|
785 |
||
786 |
COMMON_INSTR_2(mov_fpr2gpr_float, fmov_ws, fmrs, Register, FloatRegister) |
|
787 |
||
788 |
#undef COMMON_INSTR_1 |
|
789 |
#undef COMMON_INSTR_2 |
|
790 |
#undef COMMON_INSTR_3 |
|
791 |
||
792 |
||
793 |
#ifdef AARCH64 |
|
794 |
||
795 |
void mov(Register dst, Register src, AsmCondition cond) { |
|
796 |
if (cond == al) { |
|
797 |
mov(dst, src); |
|
798 |
} else { |
|
799 |
csel(dst, src, dst, cond); |
|
800 |
} |
|
801 |
} |
|
802 |
||
803 |
// Propagate other overloaded "mov" methods from Assembler. |
|
804 |
void mov(Register dst, Register src) { Assembler::mov(dst, src); } |
|
805 |
void mov(Register rd, int imm) { Assembler::mov(rd, imm); } |
|
806 |
||
807 |
void mov(Register dst, int imm, AsmCondition cond) { |
|
808 |
assert(imm == 0 || imm == 1, ""); |
|
809 |
if (imm == 0) { |
|
810 |
mov(dst, ZR, cond); |
|
811 |
} else if (imm == 1) { |
|
812 |
csinc(dst, dst, ZR, inverse(cond)); |
|
813 |
} else if (imm == -1) { |
|
814 |
csinv(dst, dst, ZR, inverse(cond)); |
|
815 |
} else { |
|
816 |
fatal("illegal mov(R%d,%d,cond)", dst->encoding(), imm); |
|
817 |
} |
|
818 |
} |
|
819 |
||
820 |
void movs(Register dst, Register src) { adds(dst, src, 0); } |
|
821 |
||
822 |
#else // AARCH64 |
|
823 |
||
824 |
void tbz(Register rt, int bit, Label& L) { |
|
825 |
assert(0 <= bit && bit < BitsPerWord, "bit number is out of range"); |
|
826 |
tst(rt, 1 << bit); |
|
827 |
b(L, eq); |
|
828 |
} |
|
829 |
||
830 |
void tbnz(Register rt, int bit, Label& L) { |
|
831 |
assert(0 <= bit && bit < BitsPerWord, "bit number is out of range"); |
|
832 |
tst(rt, 1 << bit); |
|
833 |
b(L, ne); |
|
834 |
} |
|
835 |
||
836 |
void cbz(Register rt, Label& L) { |
|
837 |
cmp(rt, 0); |
|
838 |
b(L, eq); |
|
839 |
} |
|
840 |
||
841 |
void cbz(Register rt, address target) { |
|
842 |
cmp(rt, 0); |
|
843 |
b(target, eq); |
|
844 |
} |
|
845 |
||
846 |
void cbnz(Register rt, Label& L) { |
|
847 |
cmp(rt, 0); |
|
848 |
b(L, ne); |
|
849 |
} |
|
850 |
||
851 |
void ret(Register dst = LR) { |
|
852 |
bx(dst); |
|
853 |
} |
|
854 |
||
855 |
#endif // AARCH64 |
|
856 |
||
857 |
Register zero_register(Register tmp) { |
|
858 |
#ifdef AARCH64 |
|
859 |
return ZR; |
|
860 |
#else |
|
861 |
mov(tmp, 0); |
|
862 |
return tmp; |
|
863 |
#endif |
|
864 |
} |
|
865 |
||
866 |
void logical_shift_left(Register dst, Register src, int shift) { |
|
867 |
#ifdef AARCH64 |
|
868 |
_lsl(dst, src, shift); |
|
869 |
#else |
|
870 |
mov(dst, AsmOperand(src, lsl, shift)); |
|
871 |
#endif |
|
872 |
} |
|
873 |
||
874 |
void logical_shift_left_32(Register dst, Register src, int shift) { |
|
875 |
#ifdef AARCH64 |
|
876 |
_lsl_w(dst, src, shift); |
|
877 |
#else |
|
878 |
mov(dst, AsmOperand(src, lsl, shift)); |
|
879 |
#endif |
|
880 |
} |
|
881 |
||
882 |
void logical_shift_right(Register dst, Register src, int shift) { |
|
883 |
#ifdef AARCH64 |
|
884 |
_lsr(dst, src, shift); |
|
885 |
#else |
|
886 |
mov(dst, AsmOperand(src, lsr, shift)); |
|
887 |
#endif |
|
888 |
} |
|
889 |
||
890 |
void arith_shift_right(Register dst, Register src, int shift) { |
|
891 |
#ifdef AARCH64 |
|
892 |
_asr(dst, src, shift); |
|
893 |
#else |
|
894 |
mov(dst, AsmOperand(src, asr, shift)); |
|
895 |
#endif |
|
896 |
} |
|
897 |
||
898 |
void asr_32(Register dst, Register src, int shift) { |
|
899 |
#ifdef AARCH64 |
|
900 |
_asr_w(dst, src, shift); |
|
901 |
#else |
|
902 |
mov(dst, AsmOperand(src, asr, shift)); |
|
903 |
#endif |
|
904 |
} |
|
905 |
||
906 |
// If <cond> holds, compares r1 and r2. Otherwise, flags are set so that <cond> does not hold. |
|
907 |
void cond_cmp(Register r1, Register r2, AsmCondition cond) { |
|
908 |
#ifdef AARCH64 |
|
909 |
ccmp(r1, r2, flags_for_condition(inverse(cond)), cond); |
|
910 |
#else |
|
911 |
cmp(r1, r2, cond); |
|
912 |
#endif |
|
913 |
} |
|
914 |
||
915 |
// If <cond> holds, compares r and imm. Otherwise, flags are set so that <cond> does not hold. |
|
916 |
void cond_cmp(Register r, int imm, AsmCondition cond) { |
|
917 |
#ifdef AARCH64 |
|
918 |
ccmp(r, imm, flags_for_condition(inverse(cond)), cond); |
|
919 |
#else |
|
920 |
cmp(r, imm, cond); |
|
921 |
#endif |
|
922 |
} |
|
923 |
||
924 |
void align_reg(Register dst, Register src, int align) { |
|
925 |
assert (is_power_of_2(align), "should be"); |
|
926 |
#ifdef AARCH64 |
|
927 |
andr(dst, src, ~(uintx)(align-1)); |
|
928 |
#else |
|
929 |
bic(dst, src, align-1); |
|
930 |
#endif |
|
931 |
} |
|
932 |
||
933 |
void prefetch_read(Address addr) { |
|
934 |
#ifdef AARCH64 |
|
935 |
prfm(pldl1keep, addr); |
|
936 |
#else |
|
937 |
pld(addr); |
|
938 |
#endif |
|
939 |
} |
|
940 |
||
941 |
void raw_push(Register r1, Register r2) { |
|
942 |
#ifdef AARCH64 |
|
943 |
stp(r1, r2, Address(SP, -2*wordSize, pre_indexed)); |
|
944 |
#else |
|
945 |
assert(r1->encoding() < r2->encoding(), "should be ordered"); |
|
946 |
push(RegisterSet(r1) | RegisterSet(r2)); |
|
947 |
#endif |
|
948 |
} |
|
949 |
||
950 |
void raw_pop(Register r1, Register r2) { |
|
951 |
#ifdef AARCH64 |
|
952 |
ldp(r1, r2, Address(SP, 2*wordSize, post_indexed)); |
|
953 |
#else |
|
954 |
assert(r1->encoding() < r2->encoding(), "should be ordered"); |
|
955 |
pop(RegisterSet(r1) | RegisterSet(r2)); |
|
956 |
#endif |
|
957 |
} |
|
958 |
||
959 |
void raw_push(Register r1, Register r2, Register r3) { |
|
960 |
#ifdef AARCH64 |
|
961 |
raw_push(r1, r2); |
|
962 |
raw_push(r3, ZR); |
|
963 |
#else |
|
964 |
assert(r1->encoding() < r2->encoding() && r2->encoding() < r3->encoding(), "should be ordered"); |
|
965 |
push(RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3)); |
|
966 |
#endif |
|
967 |
} |
|
968 |
||
969 |
void raw_pop(Register r1, Register r2, Register r3) { |
|
970 |
#ifdef AARCH64 |
|
971 |
raw_pop(r3, ZR); |
|
972 |
raw_pop(r1, r2); |
|
973 |
#else |
|
974 |
assert(r1->encoding() < r2->encoding() && r2->encoding() < r3->encoding(), "should be ordered"); |
|
975 |
pop(RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3)); |
|
976 |
#endif |
|
977 |
} |
|
978 |
||
979 |
// Restores registers r1 and r2 previously saved by raw_push(r1, r2, ret_addr) and returns by ret_addr. Clobbers LR. |
|
980 |
void raw_pop_and_ret(Register r1, Register r2) { |
|
981 |
#ifdef AARCH64 |
|
982 |
raw_pop(r1, r2, LR); |
|
983 |
ret(); |
|
984 |
#else |
|
985 |
raw_pop(r1, r2, PC); |
|
986 |
#endif |
|
987 |
} |
|
988 |
||
989 |
void indirect_jump(Address addr, Register scratch) { |
|
990 |
#ifdef AARCH64 |
|
991 |
ldr(scratch, addr); |
|
992 |
br(scratch); |
|
993 |
#else |
|
994 |
ldr(PC, addr); |
|
995 |
#endif |
|
996 |
} |
|
997 |
||
998 |
void indirect_jump(InlinedAddress& literal, Register scratch) { |
|
999 |
#ifdef AARCH64 |
|
1000 |
ldr_literal(scratch, literal); |
|
1001 |
br(scratch); |
|
1002 |
#else |
|
1003 |
ldr_literal(PC, literal); |
|
1004 |
#endif |
|
1005 |
} |
|
1006 |
||
1007 |
#ifndef AARCH64 |
|
1008 |
void neg(Register dst, Register src) { |
|
1009 |
rsb(dst, src, 0); |
|
1010 |
} |
|
1011 |
#endif |
|
1012 |
||
1013 |
void branch_if_negative_32(Register r, Label& L) { |
|
1014 |
// Note about branch_if_negative_32() / branch_if_any_negative_32() implementation for AArch64: |
|
1015 |
// tbnz is not used instead of tst & b.mi because destination may be out of tbnz range (+-32KB) |
|
1016 |
// since these methods are used in LIR_Assembler::emit_arraycopy() to jump to stub entry. |
|
1017 |
tst_32(r, r); |
|
1018 |
b(L, mi); |
|
1019 |
} |
|
1020 |
||
1021 |
void branch_if_any_negative_32(Register r1, Register r2, Register tmp, Label& L) { |
|
1022 |
#ifdef AARCH64 |
|
1023 |
orr_32(tmp, r1, r2); |
|
1024 |
tst_32(tmp, tmp); |
|
1025 |
#else |
|
1026 |
orrs(tmp, r1, r2); |
|
1027 |
#endif |
|
1028 |
b(L, mi); |
|
1029 |
} |
|
1030 |
||
1031 |
void branch_if_any_negative_32(Register r1, Register r2, Register r3, Register tmp, Label& L) { |
|
1032 |
orr_32(tmp, r1, r2); |
|
1033 |
#ifdef AARCH64 |
|
1034 |
orr_32(tmp, tmp, r3); |
|
1035 |
tst_32(tmp, tmp); |
|
1036 |
#else |
|
1037 |
orrs(tmp, tmp, r3); |
|
1038 |
#endif |
|
1039 |
b(L, mi); |
|
1040 |
} |
|
1041 |
||
1042 |
void add_ptr_scaled_int32(Register dst, Register r1, Register r2, int shift) { |
|
1043 |
#ifdef AARCH64 |
|
1044 |
add(dst, r1, r2, ex_sxtw, shift); |
|
1045 |
#else |
|
1046 |
add(dst, r1, AsmOperand(r2, lsl, shift)); |
|
1047 |
#endif |
|
1048 |
} |
|
1049 |
||
1050 |
void sub_ptr_scaled_int32(Register dst, Register r1, Register r2, int shift) { |
|
1051 |
#ifdef AARCH64 |
|
1052 |
sub(dst, r1, r2, ex_sxtw, shift); |
|
1053 |
#else |
|
1054 |
sub(dst, r1, AsmOperand(r2, lsl, shift)); |
|
1055 |
#endif |
|
1056 |
} |
|
1057 |
||
1058 |
||
1059 |
// klass oop manipulations if compressed |
|
1060 |
||
1061 |
#ifdef AARCH64 |
|
1062 |
void load_klass(Register dst_klass, Register src_oop); |
|
1063 |
#else |
|
1064 |
void load_klass(Register dst_klass, Register src_oop, AsmCondition cond = al); |
|
1065 |
#endif // AARCH64 |
|
1066 |
||
1067 |
void store_klass(Register src_klass, Register dst_oop); |
|
1068 |
||
1069 |
#ifdef AARCH64 |
|
1070 |
void store_klass_gap(Register dst); |
|
1071 |
#endif // AARCH64 |
|
1072 |
||
1073 |
// oop manipulations |
|
1074 |
||
1075 |
void load_heap_oop(Register dst, Address src); |
|
1076 |
void store_heap_oop(Register src, Address dst); |
|
1077 |
void store_heap_oop(Address dst, Register src) { |
|
1078 |
store_heap_oop(src, dst); |
|
1079 |
} |
|
1080 |
void store_heap_oop_null(Register src, Address dst); |
|
1081 |
||
1082 |
#ifdef AARCH64 |
|
1083 |
void encode_heap_oop(Register dst, Register src); |
|
1084 |
void encode_heap_oop(Register r) { |
|
1085 |
encode_heap_oop(r, r); |
|
1086 |
} |
|
1087 |
void decode_heap_oop(Register dst, Register src); |
|
1088 |
void decode_heap_oop(Register r) { |
|
1089 |
decode_heap_oop(r, r); |
|
1090 |
} |
|
1091 |
||
1092 |
#ifdef COMPILER2 |
|
1093 |
void encode_heap_oop_not_null(Register dst, Register src); |
|
1094 |
void decode_heap_oop_not_null(Register dst, Register src); |
|
1095 |
||
1096 |
void set_narrow_klass(Register dst, Klass* k); |
|
1097 |
void set_narrow_oop(Register dst, jobject obj); |
|
1098 |
#endif |
|
1099 |
||
1100 |
void encode_klass_not_null(Register r); |
|
1101 |
void encode_klass_not_null(Register dst, Register src); |
|
1102 |
void decode_klass_not_null(Register r); |
|
1103 |
void decode_klass_not_null(Register dst, Register src); |
|
1104 |
||
1105 |
void reinit_heapbase(); |
|
1106 |
||
1107 |
#ifdef ASSERT |
|
1108 |
void verify_heapbase(const char* msg); |
|
1109 |
#endif // ASSERT |
|
1110 |
||
1111 |
static int instr_count_for_mov_slow(intptr_t c); |
|
1112 |
static int instr_count_for_mov_slow(address addr); |
|
1113 |
static int instr_count_for_decode_klass_not_null(); |
|
1114 |
#endif // AARCH64 |
|
1115 |
||
1116 |
void ldr_global_ptr(Register reg, address address_of_global); |
|
1117 |
void ldr_global_s32(Register reg, address address_of_global); |
|
1118 |
void ldrb_global(Register reg, address address_of_global); |
|
1119 |
||
1120 |
// address_placeholder_instruction is invalid instruction and is used |
|
1121 |
// as placeholder in code for address of label |
|
1122 |
enum { address_placeholder_instruction = 0xFFFFFFFF }; |
|
1123 |
||
1124 |
void emit_address(Label& L) { |
|
1125 |
assert(!L.is_bound(), "otherwise address will not be patched"); |
|
1126 |
target(L); // creates relocation which will be patched later |
|
1127 |
||
1128 |
assert ((offset() & (wordSize-1)) == 0, "should be aligned by word size"); |
|
1129 |
||
1130 |
#ifdef AARCH64 |
|
1131 |
emit_int32(address_placeholder_instruction); |
|
1132 |
emit_int32(address_placeholder_instruction); |
|
1133 |
#else |
|
1134 |
AbstractAssembler::emit_address((address)address_placeholder_instruction); |
|
1135 |
#endif |
|
1136 |
} |
|
1137 |
||
1138 |
void b(address target, AsmCondition cond = al) { |
|
1139 |
Assembler::b(target, cond); \ |
|
1140 |
} |
|
1141 |
void b(Label& L, AsmCondition cond = al) { |
|
1142 |
// internal jumps |
|
1143 |
Assembler::b(target(L), cond); |
|
1144 |
} |
|
1145 |
||
1146 |
void bl(address target NOT_AARCH64_ARG(AsmCondition cond = al)) { |
|
1147 |
Assembler::bl(target NOT_AARCH64_ARG(cond)); |
|
1148 |
} |
|
1149 |
void bl(Label& L NOT_AARCH64_ARG(AsmCondition cond = al)) { |
|
1150 |
// internal calls |
|
1151 |
Assembler::bl(target(L) NOT_AARCH64_ARG(cond)); |
|
1152 |
} |
|
1153 |
||
1154 |
#ifndef AARCH64 |
|
1155 |
void adr(Register dest, Label& L, AsmCondition cond = al) { |
|
1156 |
int delta = target(L) - pc() - 8; |
|
1157 |
if (delta >= 0) { |
|
1158 |
add(dest, PC, delta, cond); |
|
1159 |
} else { |
|
1160 |
sub(dest, PC, -delta, cond); |
|
1161 |
} |
|
1162 |
} |
|
1163 |
#endif // !AARCH64 |
|
1164 |
||
1165 |
// Variable-length jump and calls. We now distinguish only the |
|
1166 |
// patchable case from the other cases. Patchable must be |
|
1167 |
// distinguised from relocable. Relocable means the generated code |
|
1168 |
// containing the jump/call may move. Patchable means that the |
|
1169 |
// targeted address may be changed later. |
|
1170 |
||
1171 |
// Non patchable versions. |
|
1172 |
// - used only for relocInfo::runtime_call_type and relocInfo::none |
|
1173 |
// - may use relative or absolute format (do not use relocInfo::none |
|
1174 |
// if the generated code may move) |
|
1175 |
// - the implementation takes into account switch to THUMB mode if the |
|
1176 |
// destination is a THUMB address |
|
1177 |
// - the implementation supports far targets |
|
1178 |
// |
|
1179 |
// To reduce regression risk, scratch still defaults to noreg on |
|
1180 |
// arm32. This results in patchable instructions. However, if |
|
1181 |
// patching really matters, the call sites should be modified and |
|
1182 |
// use patchable_call or patchable_jump. If patching is not required |
|
1183 |
// and if a register can be cloberred, it should be explicitly |
|
1184 |
// specified to allow future optimizations. |
|
1185 |
void jump(address target, |
|
1186 |
relocInfo::relocType rtype = relocInfo::runtime_call_type, |
|
1187 |
Register scratch = AARCH64_ONLY(Rtemp) NOT_AARCH64(noreg) |
|
1188 |
#ifndef AARCH64 |
|
1189 |
, AsmCondition cond = al |
|
1190 |
#endif |
|
1191 |
); |
|
1192 |
||
1193 |
void call(address target, |
|
1194 |
RelocationHolder rspec |
|
1195 |
NOT_AARCH64_ARG(AsmCondition cond = al)); |
|
1196 |
||
1197 |
void call(address target, |
|
1198 |
relocInfo::relocType rtype = relocInfo::runtime_call_type |
|
1199 |
NOT_AARCH64_ARG(AsmCondition cond = al)) { |
|
1200 |
call(target, Relocation::spec_simple(rtype) NOT_AARCH64_ARG(cond)); |
|
1201 |
} |
|
1202 |
||
1203 |
void jump(AddressLiteral dest) { |
|
1204 |
jump(dest.target(), dest.reloc()); |
|
1205 |
} |
|
1206 |
#ifndef AARCH64 |
|
1207 |
void jump(address dest, relocInfo::relocType rtype, AsmCondition cond) { |
|
1208 |
jump(dest, rtype, Rtemp, cond); |
|
1209 |
} |
|
1210 |
#endif |
|
1211 |
||
1212 |
void call(AddressLiteral dest) { |
|
1213 |
call(dest.target(), dest.reloc()); |
|
1214 |
} |
|
1215 |
||
1216 |
// Patchable version: |
|
1217 |
// - set_destination can be used to atomically change the target |
|
1218 |
// |
|
1219 |
// The targets for patchable_jump and patchable_call must be in the |
|
1220 |
// code cache. |
|
1221 |
// [ including possible extensions of the code cache, like AOT code ] |
|
1222 |
// |
|
1223 |
// To reduce regression risk, scratch still defaults to noreg on |
|
1224 |
// arm32. If a register can be cloberred, it should be explicitly |
|
1225 |
// specified to allow future optimizations. |
|
1226 |
void patchable_jump(address target, |
|
1227 |
relocInfo::relocType rtype = relocInfo::runtime_call_type, |
|
1228 |
Register scratch = AARCH64_ONLY(Rtemp) NOT_AARCH64(noreg) |
|
1229 |
#ifndef AARCH64 |
|
1230 |
, AsmCondition cond = al |
|
1231 |
#endif |
|
1232 |
); |
|
1233 |
||
1234 |
// patchable_call may scratch Rtemp |
|
1235 |
int patchable_call(address target, |
|
1236 |
RelocationHolder const& rspec, |
|
1237 |
bool c2 = false); |
|
1238 |
||
1239 |
int patchable_call(address target, |
|
1240 |
relocInfo::relocType rtype, |
|
1241 |
bool c2 = false) { |
|
1242 |
return patchable_call(target, Relocation::spec_simple(rtype), c2); |
|
1243 |
} |
|
1244 |
||
1245 |
#if defined(AARCH64) && defined(COMPILER2) |
|
1246 |
static int call_size(address target, bool far, bool patchable); |
|
1247 |
#endif |
|
1248 |
||
1249 |
#ifdef AARCH64 |
|
1250 |
static bool page_reachable_from_cache(address target); |
|
1251 |
#endif |
|
1252 |
static bool _reachable_from_cache(address target); |
|
1253 |
static bool _cache_fully_reachable(); |
|
1254 |
bool cache_fully_reachable(); |
|
1255 |
bool reachable_from_cache(address target); |
|
1256 |
||
1257 |
void zero_extend(Register rd, Register rn, int bits); |
|
1258 |
void sign_extend(Register rd, Register rn, int bits); |
|
1259 |
||
1260 |
inline void zap_high_non_significant_bits(Register r) { |
|
1261 |
#ifdef AARCH64 |
|
1262 |
if(ZapHighNonSignificantBits) { |
|
1263 |
movk(r, 0xBAAD, 48); |
|
1264 |
movk(r, 0xF00D, 32); |
|
1265 |
} |
|
1266 |
#endif |
|
1267 |
} |
|
1268 |
||
1269 |
#ifndef AARCH64 |
|
1270 |
void long_move(Register rd_lo, Register rd_hi, |
|
1271 |
Register rn_lo, Register rn_hi, |
|
1272 |
AsmCondition cond = al); |
|
1273 |
void long_shift(Register rd_lo, Register rd_hi, |
|
1274 |
Register rn_lo, Register rn_hi, |
|
1275 |
AsmShift shift, Register count); |
|
1276 |
void long_shift(Register rd_lo, Register rd_hi, |
|
1277 |
Register rn_lo, Register rn_hi, |
|
1278 |
AsmShift shift, int count); |
|
1279 |
||
1280 |
void atomic_cas(Register tmpreg1, Register tmpreg2, Register oldval, Register newval, Register base, int offset); |
|
1281 |
void atomic_cas_bool(Register oldval, Register newval, Register base, int offset, Register tmpreg); |
|
1282 |
void atomic_cas64(Register temp_lo, Register temp_hi, Register temp_result, Register oldval_lo, Register oldval_hi, Register newval_lo, Register newval_hi, Register base, int offset); |
|
1283 |
#endif // !AARCH64 |
|
1284 |
||
1285 |
void cas_for_lock_acquire(Register oldval, Register newval, Register base, Register tmp, Label &slow_case, bool allow_fallthrough_on_failure = false, bool one_shot = false); |
|
1286 |
void cas_for_lock_release(Register oldval, Register newval, Register base, Register tmp, Label &slow_case, bool allow_fallthrough_on_failure = false, bool one_shot = false); |
|
1287 |
||
1288 |
#ifndef PRODUCT |
|
1289 |
// Preserves flags and all registers. |
|
1290 |
// On SMP the updated value might not be visible to external observers without a sychronization barrier |
|
1291 |
void cond_atomic_inc32(AsmCondition cond, int* counter_addr); |
|
1292 |
#endif // !PRODUCT |
|
1293 |
||
1294 |
// unconditional non-atomic increment |
|
1295 |
void inc_counter(address counter_addr, Register tmpreg1, Register tmpreg2); |
|
1296 |
void inc_counter(int* counter_addr, Register tmpreg1, Register tmpreg2) { |
|
1297 |
inc_counter((address) counter_addr, tmpreg1, tmpreg2); |
|
1298 |
} |
|
1299 |
||
1300 |
void pd_patch_instruction(address branch, address target); |
|
1301 |
||
1302 |
// Loading and storing values by size and signed-ness; |
|
1303 |
// size must not exceed wordSize (i.e. 8-byte values are not supported on 32-bit ARM); |
|
1304 |
// each of these calls generates exactly one load or store instruction, |
|
1305 |
// so src can be pre- or post-indexed address. |
|
1306 |
#ifdef AARCH64 |
|
1307 |
void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed); |
|
1308 |
void store_sized_value(Register src, Address dst, size_t size_in_bytes); |
|
1309 |
#else |
|
1310 |
// 32-bit ARM variants also support conditional execution |
|
1311 |
void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, AsmCondition cond = al); |
|
1312 |
void store_sized_value(Register src, Address dst, size_t size_in_bytes, AsmCondition cond = al); |
|
1313 |
#endif |
|
1314 |
||
1315 |
void lookup_interface_method(Register recv_klass, |
|
1316 |
Register intf_klass, |
|
48557 | 1317 |
RegisterOrConstant itable_index, |
42664 | 1318 |
Register method_result, |
1319 |
Register temp_reg1, |
|
1320 |
Register temp_reg2, |
|
1321 |
Label& L_no_such_interface); |
|
1322 |
||
1323 |
// Compare char[] arrays aligned to 4 bytes. |
|
1324 |
void char_arrays_equals(Register ary1, Register ary2, |
|
1325 |
Register limit, Register result, |
|
1326 |
Register chr1, Register chr2, Label& Ldone); |
|
1327 |
||
1328 |
||
1329 |
void floating_cmp(Register dst); |
|
1330 |
||
1331 |
// improved x86 portability (minimizing source code changes) |
|
1332 |
||
1333 |
void ldr_literal(Register rd, AddressLiteral addr) { |
|
1334 |
relocate(addr.rspec()); |
|
1335 |
#ifdef AARCH64 |
|
1336 |
ldr(rd, addr.target()); |
|
1337 |
#else |
|
1338 |
ldr(rd, Address(PC, addr.target() - pc() - 8)); |
|
1339 |
#endif |
|
1340 |
} |
|
1341 |
||
1342 |
void lea(Register Rd, AddressLiteral addr) { |
|
1343 |
// Never dereferenced, as on x86 (lval status ignored) |
|
1344 |
mov_address(Rd, addr.target(), addr.rspec()); |
|
1345 |
} |
|
1346 |
||
1347 |
void restore_default_fp_mode(); |
|
1348 |
||
1349 |
#ifdef COMPILER2 |
|
1350 |
#ifdef AARCH64 |
|
1351 |
// Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file. |
|
1352 |
void fast_lock(Register obj, Register box, Register scratch, Register scratch2, Register scratch3); |
|
1353 |
void fast_unlock(Register obj, Register box, Register scratch, Register scratch2, Register scratch3); |
|
1354 |
#else |
|
1355 |
void fast_lock(Register obj, Register box, Register scratch, Register scratch2); |
|
1356 |
void fast_unlock(Register obj, Register box, Register scratch, Register scratch2); |
|
1357 |
#endif |
|
1358 |
#endif |
|
1359 |
||
1360 |
#ifdef AARCH64 |
|
1361 |
||
1362 |
#define F(mnemonic) \ |
|
1363 |
void mnemonic(Register rt, address target) { \ |
|
1364 |
Assembler::mnemonic(rt, target); \ |
|
1365 |
} \ |
|
1366 |
void mnemonic(Register rt, Label& L) { \ |
|
1367 |
Assembler::mnemonic(rt, target(L)); \ |
|
1368 |
} |
|
1369 |
||
1370 |
F(cbz_w); |
|
1371 |
F(cbnz_w); |
|
1372 |
F(cbz); |
|
1373 |
F(cbnz); |
|
1374 |
||
1375 |
#undef F |
|
1376 |
||
1377 |
#define F(mnemonic) \ |
|
1378 |
void mnemonic(Register rt, int bit, address target) { \ |
|
1379 |
Assembler::mnemonic(rt, bit, target); \ |
|
1380 |
} \ |
|
1381 |
void mnemonic(Register rt, int bit, Label& L) { \ |
|
1382 |
Assembler::mnemonic(rt, bit, target(L)); \ |
|
1383 |
} |
|
1384 |
||
1385 |
F(tbz); |
|
1386 |
F(tbnz); |
|
1387 |
#undef F |
|
1388 |
||
1389 |
#endif // AARCH64 |
|
1390 |
||
1391 |
}; |
|
1392 |
||
1393 |
||
1394 |
// The purpose of this class is to build several code fragments of the same size |
|
1395 |
// in order to allow fast table branch. |
|
1396 |
||
49364
601146c66cad
8173070: Remove ValueObj class for allocation subclassing for runtime code
coleenp
parents:
49010
diff
changeset
|
1397 |
class FixedSizeCodeBlock { |
42664 | 1398 |
public: |
1399 |
FixedSizeCodeBlock(MacroAssembler* masm, int size_in_instrs, bool enabled); |
|
1400 |
~FixedSizeCodeBlock(); |
|
1401 |
||
1402 |
private: |
|
1403 |
MacroAssembler* _masm; |
|
1404 |
address _start; |
|
1405 |
int _size_in_instrs; |
|
1406 |
bool _enabled; |
|
1407 |
}; |
|
1408 |
||
1409 |
||
1410 |
#endif // CPU_ARM_VM_MACROASSEMBLER_ARM_HPP |
|
1411 |