42664
|
1 |
/*
|
43969
|
2 |
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
42664
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 |
*
|
|
5 |
* This code is free software; you can redistribute it and/or modify it
|
|
6 |
* under the terms of the GNU General Public License version 2 only, as
|
|
7 |
* published by the Free Software Foundation.
|
|
8 |
*
|
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
13 |
* accompanied this code).
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License version
|
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 |
*
|
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
20 |
* or visit www.oracle.com if you need additional information or have any
|
|
21 |
* questions.
|
|
22 |
*
|
|
23 |
*/
|
|
24 |
|
|
25 |
#ifndef CPU_ARM_VM_MACROASSEMBLER_ARM_HPP
|
|
26 |
#define CPU_ARM_VM_MACROASSEMBLER_ARM_HPP
|
|
27 |
|
|
28 |
#include "code/relocInfo.hpp"
|
|
29 |
#include "code/relocInfo_ext.hpp"
|
|
30 |
|
|
31 |
class BiasedLockingCounters;
|
|
32 |
|
|
33 |
// Introduced AddressLiteral and its subclasses to ease portability from
|
|
34 |
// x86 and avoid relocation issues
|
|
35 |
class AddressLiteral VALUE_OBJ_CLASS_SPEC {
|
|
36 |
RelocationHolder _rspec;
|
|
37 |
// Typically we use AddressLiterals we want to use their rval
|
|
38 |
// However in some situations we want the lval (effect address) of the item.
|
|
39 |
// We provide a special factory for making those lvals.
|
|
40 |
bool _is_lval;
|
|
41 |
|
|
42 |
address _target;
|
|
43 |
|
|
44 |
private:
|
|
45 |
static relocInfo::relocType reloc_for_target(address target) {
|
|
46 |
// Used for ExternalAddress or when the type is not specified
|
|
47 |
// Sometimes ExternalAddress is used for values which aren't
|
|
48 |
// exactly addresses, like the card table base.
|
|
49 |
// external_word_type can't be used for values in the first page
|
|
50 |
// so just skip the reloc in that case.
|
|
51 |
return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none;
|
|
52 |
}
|
|
53 |
|
|
54 |
void set_rspec(relocInfo::relocType rtype);
|
|
55 |
|
|
56 |
protected:
|
|
57 |
// creation
|
|
58 |
AddressLiteral()
|
|
59 |
: _is_lval(false),
|
|
60 |
_target(NULL)
|
|
61 |
{}
|
|
62 |
|
|
63 |
public:
|
|
64 |
|
|
65 |
AddressLiteral(address target, relocInfo::relocType rtype) {
|
|
66 |
_is_lval = false;
|
|
67 |
_target = target;
|
|
68 |
set_rspec(rtype);
|
|
69 |
}
|
|
70 |
|
|
71 |
AddressLiteral(address target, RelocationHolder const& rspec)
|
|
72 |
: _rspec(rspec),
|
|
73 |
_is_lval(false),
|
|
74 |
_target(target)
|
|
75 |
{}
|
|
76 |
|
|
77 |
AddressLiteral(address target) {
|
|
78 |
_is_lval = false;
|
|
79 |
_target = target;
|
|
80 |
set_rspec(reloc_for_target(target));
|
|
81 |
}
|
|
82 |
|
|
83 |
AddressLiteral addr() {
|
|
84 |
AddressLiteral ret = *this;
|
|
85 |
ret._is_lval = true;
|
|
86 |
return ret;
|
|
87 |
}
|
|
88 |
|
|
89 |
private:
|
|
90 |
|
|
91 |
address target() { return _target; }
|
|
92 |
bool is_lval() { return _is_lval; }
|
|
93 |
|
|
94 |
relocInfo::relocType reloc() const { return _rspec.type(); }
|
|
95 |
const RelocationHolder& rspec() const { return _rspec; }
|
|
96 |
|
|
97 |
friend class Assembler;
|
|
98 |
friend class MacroAssembler;
|
|
99 |
friend class Address;
|
|
100 |
friend class LIR_Assembler;
|
|
101 |
friend class InlinedAddress;
|
|
102 |
};
|
|
103 |
|
|
104 |
class ExternalAddress: public AddressLiteral {
|
|
105 |
|
|
106 |
public:
|
|
107 |
|
|
108 |
ExternalAddress(address target) : AddressLiteral(target) {}
|
|
109 |
|
|
110 |
};
|
|
111 |
|
|
112 |
class InternalAddress: public AddressLiteral {
|
|
113 |
|
|
114 |
public:
|
|
115 |
|
|
116 |
InternalAddress(address target) : AddressLiteral(target, relocInfo::internal_word_type) {}
|
|
117 |
|
|
118 |
};
|
|
119 |
|
|
120 |
// Inlined constants, for use with ldr_literal / bind_literal
|
|
121 |
// Note: InlinedInteger not supported (use move_slow(Register,int[,cond]))
|
|
122 |
class InlinedLiteral: StackObj {
|
|
123 |
public:
|
|
124 |
Label label; // need to be public for direct access with &
|
|
125 |
InlinedLiteral() {
|
|
126 |
}
|
|
127 |
};
|
|
128 |
|
|
129 |
class InlinedMetadata: public InlinedLiteral {
|
|
130 |
private:
|
|
131 |
Metadata *_data;
|
|
132 |
|
|
133 |
public:
|
|
134 |
InlinedMetadata(Metadata *data): InlinedLiteral() {
|
|
135 |
_data = data;
|
|
136 |
}
|
|
137 |
Metadata *data() { return _data; }
|
|
138 |
};
|
|
139 |
|
|
140 |
// Currently unused
|
|
141 |
// class InlinedOop: public InlinedLiteral {
|
|
142 |
// private:
|
|
143 |
// jobject _jobject;
|
|
144 |
//
|
|
145 |
// public:
|
|
146 |
// InlinedOop(jobject target): InlinedLiteral() {
|
|
147 |
// _jobject = target;
|
|
148 |
// }
|
|
149 |
// jobject jobject() { return _jobject; }
|
|
150 |
// };
|
|
151 |
|
|
152 |
class InlinedAddress: public InlinedLiteral {
|
|
153 |
private:
|
|
154 |
AddressLiteral _literal;
|
|
155 |
|
|
156 |
public:
|
|
157 |
|
|
158 |
InlinedAddress(jobject object): InlinedLiteral(), _literal((address)object, relocInfo::oop_type) {
|
|
159 |
ShouldNotReachHere(); // use mov_oop (or implement InlinedOop)
|
|
160 |
}
|
|
161 |
|
|
162 |
InlinedAddress(Metadata *data): InlinedLiteral(), _literal((address)data, relocInfo::metadata_type) {
|
|
163 |
ShouldNotReachHere(); // use InlinedMetadata or mov_metadata
|
|
164 |
}
|
|
165 |
|
|
166 |
InlinedAddress(address target, const RelocationHolder &rspec): InlinedLiteral(), _literal(target, rspec) {
|
|
167 |
assert(rspec.type() != relocInfo::oop_type, "Do not use InlinedAddress for oops");
|
|
168 |
assert(rspec.type() != relocInfo::metadata_type, "Do not use InlinedAddress for metadatas");
|
|
169 |
}
|
|
170 |
|
|
171 |
InlinedAddress(address target, relocInfo::relocType rtype): InlinedLiteral(), _literal(target, rtype) {
|
|
172 |
assert(rtype != relocInfo::oop_type, "Do not use InlinedAddress for oops");
|
|
173 |
assert(rtype != relocInfo::metadata_type, "Do not use InlinedAddress for metadatas");
|
|
174 |
}
|
|
175 |
|
|
176 |
// Note: default is relocInfo::none for InlinedAddress
|
|
177 |
InlinedAddress(address target): InlinedLiteral(), _literal(target, relocInfo::none) {
|
|
178 |
}
|
|
179 |
|
|
180 |
address target() { return _literal.target(); }
|
|
181 |
|
|
182 |
const RelocationHolder& rspec() const { return _literal.rspec(); }
|
|
183 |
};
|
|
184 |
|
|
185 |
class InlinedString: public InlinedLiteral {
|
|
186 |
private:
|
|
187 |
const char* _msg;
|
|
188 |
|
|
189 |
public:
|
|
190 |
InlinedString(const char* msg): InlinedLiteral() {
|
|
191 |
_msg = msg;
|
|
192 |
}
|
|
193 |
const char* msg() { return _msg; }
|
|
194 |
};
|
|
195 |
|
|
196 |
class MacroAssembler: public Assembler {
|
|
197 |
protected:
|
|
198 |
|
|
199 |
// Support for VM calls
|
|
200 |
//
|
|
201 |
|
|
202 |
// This is the base routine called by the different versions of call_VM_leaf.
|
|
203 |
void call_VM_leaf_helper(address entry_point, int number_of_arguments);
|
|
204 |
|
|
205 |
// This is the base routine called by the different versions of call_VM. The interpreter
|
|
206 |
// may customize this version by overriding it for its purposes (e.g., to save/restore
|
|
207 |
// additional registers when doing a VM call).
|
|
208 |
virtual void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions);
|
|
209 |
|
|
210 |
// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
|
|
211 |
// The implementation is only non-empty for the InterpreterMacroAssembler,
|
|
212 |
// as only the interpreter handles PopFrame and ForceEarlyReturn requests.
|
|
213 |
virtual void check_and_handle_popframe() {}
|
|
214 |
virtual void check_and_handle_earlyret() {}
|
|
215 |
|
|
216 |
public:
|
|
217 |
|
|
218 |
MacroAssembler(CodeBuffer* code) : Assembler(code) {}
|
|
219 |
|
|
220 |
// By default, we do not need relocation information for non
|
|
221 |
// patchable absolute addresses. However, when needed by some
|
|
222 |
// extensions, ignore_non_patchable_relocations can be modified,
|
|
223 |
// returning false to preserve all relocation information.
|
|
224 |
inline bool ignore_non_patchable_relocations() { return true; }
|
|
225 |
|
|
226 |
// Initially added to the Assembler interface as a pure virtual:
|
|
227 |
// RegisterConstant delayed_value(..)
|
|
228 |
// for:
|
|
229 |
// 6812678 macro assembler needs delayed binding of a few constants (for 6655638)
|
|
230 |
// this was subsequently modified to its present name and return type
|
|
231 |
virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, Register tmp, int offset);
|
|
232 |
|
|
233 |
#ifdef AARCH64
|
|
234 |
# define NOT_IMPLEMENTED() unimplemented("NYI at " __FILE__ ":" XSTR(__LINE__))
|
|
235 |
# define NOT_TESTED() warn("Not tested at " __FILE__ ":" XSTR(__LINE__))
|
|
236 |
#endif
|
|
237 |
|
|
238 |
void align(int modulus);
|
|
239 |
|
|
240 |
// Support for VM calls
|
|
241 |
//
|
|
242 |
// It is imperative that all calls into the VM are handled via the call_VM methods.
|
|
243 |
// They make sure that the stack linkage is setup correctly. call_VM's correspond
|
|
244 |
// to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
|
|
245 |
|
|
246 |
void call_VM(Register oop_result, address entry_point, bool check_exceptions = true);
|
|
247 |
void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
|
|
248 |
void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
|
|
249 |
void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
|
|
250 |
|
|
251 |
// The following methods are required by templateTable.cpp,
|
|
252 |
// but not used on ARM.
|
|
253 |
void call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
|
|
254 |
void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
|
|
255 |
void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
|
|
256 |
void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
|
|
257 |
|
|
258 |
// Note: The super_call_VM calls are not used on ARM
|
|
259 |
|
|
260 |
// Raw call, without saving/restoring registers, exception handling, etc.
|
|
261 |
// Mainly used from various stubs.
|
|
262 |
// Note: if 'save_R9_if_scratched' is true, call_VM may on some
|
|
263 |
// platforms save values on the stack. Set it to false (and handle
|
|
264 |
// R9 in the callers) if the top of the stack must not be modified
|
|
265 |
// by call_VM.
|
|
266 |
void call_VM(address entry_point, bool save_R9_if_scratched);
|
|
267 |
|
|
268 |
void call_VM_leaf(address entry_point);
|
|
269 |
void call_VM_leaf(address entry_point, Register arg_1);
|
|
270 |
void call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
|
|
271 |
void call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
|
|
272 |
void call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4);
|
|
273 |
|
|
274 |
void get_vm_result(Register oop_result, Register tmp);
|
|
275 |
void get_vm_result_2(Register metadata_result, Register tmp);
|
|
276 |
|
|
277 |
// Always sets/resets sp, which default to SP if (last_sp == noreg)
|
|
278 |
// Optionally sets/resets fp (use noreg to avoid setting it)
|
|
279 |
// Always sets/resets pc on AArch64; optionally sets/resets pc on 32-bit ARM depending on save_last_java_pc flag
|
|
280 |
// Note: when saving PC, set_last_Java_frame returns PC's offset in the code section
|
|
281 |
// (for oop_maps offset computation)
|
|
282 |
int set_last_Java_frame(Register last_sp, Register last_fp, bool save_last_java_pc, Register tmp);
|
|
283 |
void reset_last_Java_frame(Register tmp);
|
|
284 |
// status set in set_last_Java_frame for reset_last_Java_frame
|
|
285 |
bool _fp_saved;
|
|
286 |
bool _pc_saved;
|
|
287 |
|
|
288 |
#ifdef PRODUCT
|
|
289 |
#define BLOCK_COMMENT(str) /* nothing */
|
|
290 |
#define STOP(error) __ stop(error)
|
|
291 |
#else
|
|
292 |
#define BLOCK_COMMENT(str) __ block_comment(str)
|
|
293 |
#define STOP(error) __ block_comment(error); __ stop(error)
|
|
294 |
#endif
|
|
295 |
|
|
296 |
void lookup_virtual_method(Register recv_klass,
|
|
297 |
Register vtable_index,
|
|
298 |
Register method_result);
|
|
299 |
|
|
300 |
// Test sub_klass against super_klass, with fast and slow paths.
|
|
301 |
|
|
302 |
// The fast path produces a tri-state answer: yes / no / maybe-slow.
|
|
303 |
// One of the three labels can be NULL, meaning take the fall-through.
|
|
304 |
// No registers are killed, except temp_regs.
|
|
305 |
void check_klass_subtype_fast_path(Register sub_klass,
|
|
306 |
Register super_klass,
|
|
307 |
Register temp_reg,
|
|
308 |
Register temp_reg2,
|
|
309 |
Label* L_success,
|
|
310 |
Label* L_failure,
|
|
311 |
Label* L_slow_path);
|
|
312 |
|
|
313 |
// The rest of the type check; must be wired to a corresponding fast path.
|
|
314 |
// It does not repeat the fast path logic, so don't use it standalone.
|
|
315 |
// temp_reg3 can be noreg, if no temps are available.
|
|
316 |
// Updates the sub's secondary super cache as necessary.
|
|
317 |
// If set_cond_codes:
|
|
318 |
// - condition codes will be Z on success, NZ on failure.
|
|
319 |
// - temp_reg will be 0 on success, non-0 on failure
|
|
320 |
void check_klass_subtype_slow_path(Register sub_klass,
|
|
321 |
Register super_klass,
|
|
322 |
Register temp_reg,
|
|
323 |
Register temp_reg2,
|
|
324 |
Register temp_reg3, // auto assigned if noreg
|
|
325 |
Label* L_success,
|
|
326 |
Label* L_failure,
|
|
327 |
bool set_cond_codes = false);
|
|
328 |
|
|
329 |
// Simplified, combined version, good for typical uses.
|
|
330 |
// temp_reg3 can be noreg, if no temps are available. It is used only on slow path.
|
|
331 |
// Falls through on failure.
|
|
332 |
void check_klass_subtype(Register sub_klass,
|
|
333 |
Register super_klass,
|
|
334 |
Register temp_reg,
|
|
335 |
Register temp_reg2,
|
|
336 |
Register temp_reg3, // auto assigned on slow path if noreg
|
|
337 |
Label& L_success);
|
|
338 |
|
|
339 |
// Returns address of receiver parameter, using tmp as base register. tmp and params_count can be the same.
|
|
340 |
Address receiver_argument_address(Register params_base, Register params_count, Register tmp);
|
|
341 |
|
|
342 |
void _verify_oop(Register reg, const char* s, const char* file, int line);
|
|
343 |
void _verify_oop_addr(Address addr, const char * s, const char* file, int line);
|
|
344 |
|
|
345 |
// TODO: verify method and klass metadata (compare against vptr?)
|
|
346 |
void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
|
|
347 |
void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line) {}
|
|
348 |
|
|
349 |
#define verify_oop(reg) _verify_oop(reg, "broken oop " #reg, __FILE__, __LINE__)
|
|
350 |
#define verify_oop_addr(addr) _verify_oop_addr(addr, "broken oop ", __FILE__, __LINE__)
|
|
351 |
#define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
|
|
352 |
#define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
|
|
353 |
|
|
354 |
void null_check(Register reg, Register tmp, int offset = -1);
|
|
355 |
inline void null_check(Register reg) { null_check(reg, noreg, -1); } // for C1 lir_null_check
|
|
356 |
|
|
357 |
// Puts address of allocated object into register `obj` and end of allocated object into register `obj_end`.
|
|
358 |
void eden_allocate(Register obj, Register obj_end, Register tmp1, Register tmp2,
|
|
359 |
RegisterOrConstant size_expression, Label& slow_case);
|
|
360 |
void tlab_allocate(Register obj, Register obj_end, Register tmp1,
|
|
361 |
RegisterOrConstant size_expression, Label& slow_case);
|
|
362 |
|
|
363 |
void tlab_refill(Register top, Register tmp1, Register tmp2, Register tmp3, Register tmp4,
|
|
364 |
Label& try_eden, Label& slow_case);
|
|
365 |
void zero_memory(Register start, Register end, Register tmp);
|
|
366 |
|
|
367 |
void incr_allocated_bytes(RegisterOrConstant size_in_bytes, Register tmp);
|
|
368 |
|
|
369 |
static bool needs_explicit_null_check(intptr_t offset);
|
|
370 |
|
|
371 |
void arm_stack_overflow_check(int frame_size_in_bytes, Register tmp);
|
|
372 |
void arm_stack_overflow_check(Register Rsize, Register tmp);
|
|
373 |
|
|
374 |
void bang_stack_with_offset(int offset) {
|
|
375 |
ShouldNotReachHere();
|
|
376 |
}
|
|
377 |
|
|
378 |
// Biased locking support
|
|
379 |
// lock_reg and obj_reg must be loaded up with the appropriate values.
|
|
380 |
// swap_reg must be supplied.
|
|
381 |
// tmp_reg must be supplied.
|
|
382 |
// Optional slow case is for implementations (interpreter and C1) which branch to
|
|
383 |
// slow case directly. If slow_case is NULL, then leaves condition
|
|
384 |
// codes set (for C2's Fast_Lock node) and jumps to done label.
|
|
385 |
// Falls through for the fast locking attempt.
|
|
386 |
// Returns offset of first potentially-faulting instruction for null
|
|
387 |
// check info (currently consumed only by C1). If
|
|
388 |
// swap_reg_contains_mark is true then returns -1 as it is assumed
|
|
389 |
// the calling code has already passed any potential faults.
|
|
390 |
// Notes:
|
|
391 |
// - swap_reg and tmp_reg are scratched
|
|
392 |
// - Rtemp was (implicitly) scratched and can now be specified as the tmp2
|
|
393 |
int biased_locking_enter(Register obj_reg, Register swap_reg, Register tmp_reg,
|
|
394 |
bool swap_reg_contains_mark,
|
|
395 |
Register tmp2,
|
|
396 |
Label& done, Label& slow_case,
|
|
397 |
BiasedLockingCounters* counters = NULL);
|
|
398 |
void biased_locking_exit(Register obj_reg, Register temp_reg, Label& done);
|
|
399 |
|
|
400 |
// Building block for CAS cases of biased locking: makes CAS and records statistics.
|
|
401 |
// Optional slow_case label is used to transfer control if CAS fails. Otherwise leaves condition codes set.
|
|
402 |
void biased_locking_enter_with_cas(Register obj_reg, Register old_mark_reg, Register new_mark_reg,
|
|
403 |
Register tmp, Label& slow_case, int* counter_addr);
|
|
404 |
|
|
405 |
#ifndef AARCH64
|
|
406 |
void nop() {
|
|
407 |
mov(R0, R0);
|
|
408 |
}
|
|
409 |
|
|
410 |
void push(Register rd, AsmCondition cond = al) {
|
|
411 |
assert(rd != SP, "unpredictable instruction");
|
|
412 |
str(rd, Address(SP, -wordSize, pre_indexed), cond);
|
|
413 |
}
|
|
414 |
|
|
415 |
void push(RegisterSet reg_set, AsmCondition cond = al) {
|
|
416 |
assert(!reg_set.contains(SP), "unpredictable instruction");
|
|
417 |
stmdb(SP, reg_set, writeback, cond);
|
|
418 |
}
|
|
419 |
|
|
420 |
void pop(Register rd, AsmCondition cond = al) {
|
|
421 |
assert(rd != SP, "unpredictable instruction");
|
|
422 |
ldr(rd, Address(SP, wordSize, post_indexed), cond);
|
|
423 |
}
|
|
424 |
|
|
425 |
void pop(RegisterSet reg_set, AsmCondition cond = al) {
|
|
426 |
assert(!reg_set.contains(SP), "unpredictable instruction");
|
|
427 |
ldmia(SP, reg_set, writeback, cond);
|
|
428 |
}
|
|
429 |
|
|
430 |
void fpushd(FloatRegister fd, AsmCondition cond = al) {
|
|
431 |
fstmdbd(SP, FloatRegisterSet(fd), writeback, cond);
|
|
432 |
}
|
|
433 |
|
|
434 |
void fpushs(FloatRegister fd, AsmCondition cond = al) {
|
|
435 |
fstmdbs(SP, FloatRegisterSet(fd), writeback, cond);
|
|
436 |
}
|
|
437 |
|
|
438 |
void fpopd(FloatRegister fd, AsmCondition cond = al) {
|
|
439 |
fldmiad(SP, FloatRegisterSet(fd), writeback, cond);
|
|
440 |
}
|
|
441 |
|
|
442 |
void fpops(FloatRegister fd, AsmCondition cond = al) {
|
|
443 |
fldmias(SP, FloatRegisterSet(fd), writeback, cond);
|
|
444 |
}
|
|
445 |
#endif // !AARCH64
|
|
446 |
|
|
447 |
// Order access primitives
|
|
448 |
enum Membar_mask_bits {
|
|
449 |
StoreStore = 1 << 3,
|
|
450 |
LoadStore = 1 << 2,
|
|
451 |
StoreLoad = 1 << 1,
|
|
452 |
LoadLoad = 1 << 0
|
|
453 |
};
|
|
454 |
|
|
455 |
#ifdef AARCH64
|
|
456 |
// tmp register is not used on AArch64, this parameter is provided solely for better compatibility with 32-bit ARM
|
|
457 |
void membar(Membar_mask_bits order_constraint, Register tmp = noreg);
|
|
458 |
#else
|
|
459 |
void membar(Membar_mask_bits mask,
|
|
460 |
Register tmp,
|
|
461 |
bool preserve_flags = true,
|
|
462 |
Register load_tgt = noreg);
|
|
463 |
#endif
|
|
464 |
|
|
465 |
void breakpoint(AsmCondition cond = al);
|
|
466 |
void stop(const char* msg);
|
|
467 |
// prints msg and continues
|
|
468 |
void warn(const char* msg);
|
|
469 |
void unimplemented(const char* what = "");
|
|
470 |
void should_not_reach_here() { stop("should not reach here"); }
|
|
471 |
static void debug(const char* msg, const intx* registers);
|
|
472 |
|
|
473 |
// Create a walkable frame to help tracking down who called this code.
|
|
474 |
// Returns the frame size in words.
|
|
475 |
int should_not_call_this() {
|
|
476 |
raw_push(FP, LR);
|
|
477 |
should_not_reach_here();
|
|
478 |
flush();
|
|
479 |
return 2; // frame_size_in_words (FP+LR)
|
|
480 |
}
|
|
481 |
|
|
482 |
int save_all_registers();
|
|
483 |
void restore_all_registers();
|
|
484 |
int save_caller_save_registers();
|
|
485 |
void restore_caller_save_registers();
|
|
486 |
|
|
487 |
void add_rc(Register dst, Register arg1, RegisterOrConstant arg2);
|
|
488 |
|
|
489 |
// add_slow and mov_slow are used to manipulate offsets larger than 1024,
|
|
490 |
// these functions are not expected to handle all possible constants,
|
|
491 |
// only those that can really occur during compilation
|
|
492 |
void add_slow(Register rd, Register rn, int c);
|
|
493 |
void sub_slow(Register rd, Register rn, int c);
|
|
494 |
|
|
495 |
#ifdef AARCH64
|
|
496 |
static int mov_slow_helper(Register rd, intptr_t c, MacroAssembler* masm /* optional */);
|
|
497 |
#endif
|
|
498 |
|
|
499 |
void mov_slow(Register rd, intptr_t c NOT_AARCH64_ARG(AsmCondition cond = al));
|
|
500 |
void mov_slow(Register rd, const char *string);
|
|
501 |
void mov_slow(Register rd, address addr);
|
|
502 |
|
|
503 |
void patchable_mov_oop(Register rd, jobject o, int oop_index) {
|
|
504 |
mov_oop(rd, o, oop_index AARCH64_ONLY_ARG(true));
|
|
505 |
}
|
|
506 |
void mov_oop(Register rd, jobject o, int index = 0
|
|
507 |
AARCH64_ONLY_ARG(bool patchable = false)
|
|
508 |
NOT_AARCH64_ARG(AsmCondition cond = al));
|
|
509 |
|
|
510 |
|
|
511 |
void patchable_mov_metadata(Register rd, Metadata* o, int index) {
|
|
512 |
mov_metadata(rd, o, index AARCH64_ONLY_ARG(true));
|
|
513 |
}
|
|
514 |
void mov_metadata(Register rd, Metadata* o, int index = 0 AARCH64_ONLY_ARG(bool patchable = false));
|
|
515 |
|
|
516 |
void mov_float(FloatRegister fd, jfloat c NOT_AARCH64_ARG(AsmCondition cond = al));
|
|
517 |
void mov_double(FloatRegister fd, jdouble c NOT_AARCH64_ARG(AsmCondition cond = al));
|
|
518 |
|
|
519 |
#ifdef AARCH64
|
|
520 |
int mov_pc_to(Register rd) {
|
|
521 |
Label L;
|
|
522 |
adr(rd, L);
|
|
523 |
bind(L);
|
|
524 |
return offset();
|
|
525 |
}
|
|
526 |
#endif
|
|
527 |
|
|
528 |
// Note: this variant of mov_address assumes the address moves with
|
|
529 |
// the code. Do *not* implement it with non-relocated instructions,
|
|
530 |
// unless PC-relative.
|
|
531 |
#ifdef AARCH64
|
|
532 |
void mov_relative_address(Register rd, address addr) {
|
|
533 |
adr(rd, addr);
|
|
534 |
}
|
|
535 |
#else
|
|
536 |
void mov_relative_address(Register rd, address addr, AsmCondition cond = al) {
|
|
537 |
int offset = addr - pc() - 8;
|
|
538 |
assert((offset & 3) == 0, "bad alignment");
|
|
539 |
if (offset >= 0) {
|
|
540 |
assert(AsmOperand::is_rotated_imm(offset), "addr too far");
|
|
541 |
add(rd, PC, offset, cond);
|
|
542 |
} else {
|
|
543 |
assert(AsmOperand::is_rotated_imm(-offset), "addr too far");
|
|
544 |
sub(rd, PC, -offset, cond);
|
|
545 |
}
|
|
546 |
}
|
|
547 |
#endif // AARCH64
|
|
548 |
|
|
549 |
// Runtime address that may vary from one execution to another. The
|
|
550 |
// symbolic_reference describes what the address is, allowing
|
|
551 |
// the address to be resolved in a different execution context.
|
|
552 |
// Warning: do not implement as a PC relative address.
|
|
553 |
void mov_address(Register rd, address addr, symbolic_Relocation::symbolic_reference t) {
|
|
554 |
mov_address(rd, addr, RelocationHolder::none);
|
|
555 |
}
|
|
556 |
|
|
557 |
// rspec can be RelocationHolder::none (for ignored symbolic_Relocation).
|
|
558 |
// In that case, the address is absolute and the generated code need
|
|
559 |
// not be relocable.
|
|
560 |
void mov_address(Register rd, address addr, RelocationHolder const& rspec) {
|
|
561 |
assert(rspec.type() != relocInfo::runtime_call_type, "do not use mov_address for runtime calls");
|
|
562 |
assert(rspec.type() != relocInfo::static_call_type, "do not use mov_address for relocable calls");
|
|
563 |
if (rspec.type() == relocInfo::none) {
|
|
564 |
// absolute address, relocation not needed
|
|
565 |
mov_slow(rd, (intptr_t)addr);
|
|
566 |
return;
|
|
567 |
}
|
|
568 |
#ifndef AARCH64
|
|
569 |
if (VM_Version::supports_movw()) {
|
|
570 |
relocate(rspec);
|
|
571 |
int c = (int)addr;
|
|
572 |
movw(rd, c & 0xffff);
|
|
573 |
if ((unsigned int)c >> 16) {
|
|
574 |
movt(rd, (unsigned int)c >> 16);
|
|
575 |
}
|
|
576 |
return;
|
|
577 |
}
|
|
578 |
#endif
|
|
579 |
Label skip_literal;
|
|
580 |
InlinedAddress addr_literal(addr, rspec);
|
|
581 |
ldr_literal(rd, addr_literal);
|
|
582 |
b(skip_literal);
|
|
583 |
bind_literal(addr_literal);
|
|
584 |
// AARCH64 WARNING: because of alignment padding, extra padding
|
|
585 |
// may be required to get a consistent size for C2, or rules must
|
|
586 |
// overestimate size see MachEpilogNode::size
|
|
587 |
bind(skip_literal);
|
|
588 |
}
|
|
589 |
|
|
590 |
// Note: Do not define mov_address for a Label
|
|
591 |
//
|
|
592 |
// Load from addresses potentially within the code are now handled
|
|
593 |
// InlinedLiteral subclasses (to allow more flexibility on how the
|
|
594 |
// ldr_literal is performed).
|
|
595 |
|
|
596 |
void ldr_literal(Register rd, InlinedAddress& L) {
|
|
597 |
assert(L.rspec().type() != relocInfo::runtime_call_type, "avoid ldr_literal for calls");
|
|
598 |
assert(L.rspec().type() != relocInfo::static_call_type, "avoid ldr_literal for calls");
|
|
599 |
relocate(L.rspec());
|
|
600 |
#ifdef AARCH64
|
|
601 |
ldr(rd, target(L.label));
|
|
602 |
#else
|
|
603 |
ldr(rd, Address(PC, target(L.label) - pc() - 8));
|
|
604 |
#endif
|
|
605 |
}
|
|
606 |
|
|
607 |
void ldr_literal(Register rd, InlinedString& L) {
|
|
608 |
const char* msg = L.msg();
|
|
609 |
if (code()->consts()->contains((address)msg)) {
|
|
610 |
// string address moves with the code
|
|
611 |
#ifdef AARCH64
|
|
612 |
ldr(rd, (address)msg);
|
|
613 |
#else
|
|
614 |
ldr(rd, Address(PC, ((address)msg) - pc() - 8));
|
|
615 |
#endif
|
|
616 |
return;
|
|
617 |
}
|
|
618 |
// Warning: use external strings with care. They are not relocated
|
|
619 |
// if the code moves. If needed, use code_string to move them
|
|
620 |
// to the consts section.
|
|
621 |
#ifdef AARCH64
|
|
622 |
ldr(rd, target(L.label));
|
|
623 |
#else
|
|
624 |
ldr(rd, Address(PC, target(L.label) - pc() - 8));
|
|
625 |
#endif
|
|
626 |
}
|
|
627 |
|
|
628 |
void ldr_literal(Register rd, InlinedMetadata& L) {
|
|
629 |
// relocation done in the bind_literal for metadatas
|
|
630 |
#ifdef AARCH64
|
|
631 |
ldr(rd, target(L.label));
|
|
632 |
#else
|
|
633 |
ldr(rd, Address(PC, target(L.label) - pc() - 8));
|
|
634 |
#endif
|
|
635 |
}
|
|
636 |
|
|
637 |
void bind_literal(InlinedAddress& L) {
|
|
638 |
AARCH64_ONLY(align(wordSize));
|
|
639 |
bind(L.label);
|
|
640 |
assert(L.rspec().type() != relocInfo::metadata_type, "Must use InlinedMetadata");
|
|
641 |
// We currently do not use oop 'bound' literals.
|
|
642 |
// If the code evolves and the following assert is triggered,
|
|
643 |
// we need to implement InlinedOop (see InlinedMetadata).
|
|
644 |
assert(L.rspec().type() != relocInfo::oop_type, "Inlined oops not supported");
|
|
645 |
// Note: relocation is handled by relocate calls in ldr_literal
|
|
646 |
AbstractAssembler::emit_address((address)L.target());
|
|
647 |
}
|
|
648 |
|
|
649 |
void bind_literal(InlinedString& L) {
|
|
650 |
const char* msg = L.msg();
|
|
651 |
if (code()->consts()->contains((address)msg)) {
|
|
652 |
// The Label should not be used; avoid binding it
|
|
653 |
// to detect errors.
|
|
654 |
return;
|
|
655 |
}
|
|
656 |
AARCH64_ONLY(align(wordSize));
|
|
657 |
bind(L.label);
|
|
658 |
AbstractAssembler::emit_address((address)L.msg());
|
|
659 |
}
|
|
660 |
|
|
661 |
void bind_literal(InlinedMetadata& L) {
|
|
662 |
AARCH64_ONLY(align(wordSize));
|
|
663 |
bind(L.label);
|
|
664 |
relocate(metadata_Relocation::spec_for_immediate());
|
|
665 |
AbstractAssembler::emit_address((address)L.data());
|
|
666 |
}
|
|
667 |
|
|
668 |
void load_mirror(Register mirror, Register method, Register tmp);
|
|
669 |
|
|
670 |
// Porting layer between 32-bit ARM and AArch64
|
|
671 |
|
|
672 |
#define COMMON_INSTR_1(common_mnemonic, aarch64_mnemonic, arm32_mnemonic, arg_type) \
|
|
673 |
void common_mnemonic(arg_type arg) { \
|
|
674 |
AARCH64_ONLY(aarch64_mnemonic) NOT_AARCH64(arm32_mnemonic) (arg); \
|
|
675 |
}
|
|
676 |
|
|
677 |
#define COMMON_INSTR_2(common_mnemonic, aarch64_mnemonic, arm32_mnemonic, arg1_type, arg2_type) \
|
|
678 |
void common_mnemonic(arg1_type arg1, arg2_type arg2) { \
|
|
679 |
AARCH64_ONLY(aarch64_mnemonic) NOT_AARCH64(arm32_mnemonic) (arg1, arg2); \
|
|
680 |
}
|
|
681 |
|
|
682 |
#define COMMON_INSTR_3(common_mnemonic, aarch64_mnemonic, arm32_mnemonic, arg1_type, arg2_type, arg3_type) \
|
|
683 |
void common_mnemonic(arg1_type arg1, arg2_type arg2, arg3_type arg3) { \
|
|
684 |
AARCH64_ONLY(aarch64_mnemonic) NOT_AARCH64(arm32_mnemonic) (arg1, arg2, arg3); \
|
|
685 |
}
|
|
686 |
|
|
687 |
COMMON_INSTR_1(jump, br, bx, Register)
|
|
688 |
COMMON_INSTR_1(call, blr, blx, Register)
|
|
689 |
|
|
690 |
COMMON_INSTR_2(cbz_32, cbz_w, cbz, Register, Label&)
|
|
691 |
COMMON_INSTR_2(cbnz_32, cbnz_w, cbnz, Register, Label&)
|
|
692 |
|
|
693 |
COMMON_INSTR_2(ldr_u32, ldr_w, ldr, Register, Address)
|
|
694 |
COMMON_INSTR_2(ldr_s32, ldrsw, ldr, Register, Address)
|
|
695 |
COMMON_INSTR_2(str_32, str_w, str, Register, Address)
|
|
696 |
|
|
697 |
COMMON_INSTR_2(mvn_32, mvn_w, mvn, Register, Register)
|
|
698 |
COMMON_INSTR_2(cmp_32, cmp_w, cmp, Register, Register)
|
|
699 |
COMMON_INSTR_2(neg_32, neg_w, neg, Register, Register)
|
|
700 |
COMMON_INSTR_2(clz_32, clz_w, clz, Register, Register)
|
|
701 |
COMMON_INSTR_2(rbit_32, rbit_w, rbit, Register, Register)
|
|
702 |
|
|
703 |
COMMON_INSTR_2(cmp_32, cmp_w, cmp, Register, int)
|
|
704 |
COMMON_INSTR_2(cmn_32, cmn_w, cmn, Register, int)
|
|
705 |
|
|
706 |
COMMON_INSTR_3(add_32, add_w, add, Register, Register, Register)
|
|
707 |
COMMON_INSTR_3(sub_32, sub_w, sub, Register, Register, Register)
|
|
708 |
COMMON_INSTR_3(subs_32, subs_w, subs, Register, Register, Register)
|
|
709 |
COMMON_INSTR_3(mul_32, mul_w, mul, Register, Register, Register)
|
|
710 |
COMMON_INSTR_3(and_32, andr_w, andr, Register, Register, Register)
|
|
711 |
COMMON_INSTR_3(orr_32, orr_w, orr, Register, Register, Register)
|
|
712 |
COMMON_INSTR_3(eor_32, eor_w, eor, Register, Register, Register)
|
|
713 |
|
|
714 |
COMMON_INSTR_3(add_32, add_w, add, Register, Register, AsmOperand)
|
|
715 |
COMMON_INSTR_3(sub_32, sub_w, sub, Register, Register, AsmOperand)
|
|
716 |
COMMON_INSTR_3(orr_32, orr_w, orr, Register, Register, AsmOperand)
|
|
717 |
COMMON_INSTR_3(eor_32, eor_w, eor, Register, Register, AsmOperand)
|
|
718 |
COMMON_INSTR_3(and_32, andr_w, andr, Register, Register, AsmOperand)
|
|
719 |
|
|
720 |
|
|
721 |
COMMON_INSTR_3(add_32, add_w, add, Register, Register, int)
|
|
722 |
COMMON_INSTR_3(adds_32, adds_w, adds, Register, Register, int)
|
|
723 |
COMMON_INSTR_3(sub_32, sub_w, sub, Register, Register, int)
|
|
724 |
COMMON_INSTR_3(subs_32, subs_w, subs, Register, Register, int)
|
|
725 |
|
|
726 |
COMMON_INSTR_2(tst_32, tst_w, tst, Register, unsigned int)
|
|
727 |
COMMON_INSTR_2(tst_32, tst_w, tst, Register, AsmOperand)
|
|
728 |
|
|
729 |
COMMON_INSTR_3(and_32, andr_w, andr, Register, Register, uint)
|
|
730 |
COMMON_INSTR_3(orr_32, orr_w, orr, Register, Register, uint)
|
|
731 |
COMMON_INSTR_3(eor_32, eor_w, eor, Register, Register, uint)
|
|
732 |
|
|
733 |
COMMON_INSTR_1(cmp_zero_float, fcmp0_s, fcmpzs, FloatRegister)
|
|
734 |
COMMON_INSTR_1(cmp_zero_double, fcmp0_d, fcmpzd, FloatRegister)
|
|
735 |
|
|
736 |
COMMON_INSTR_2(ldr_float, ldr_s, flds, FloatRegister, Address)
|
|
737 |
COMMON_INSTR_2(str_float, str_s, fsts, FloatRegister, Address)
|
|
738 |
COMMON_INSTR_2(mov_float, fmov_s, fcpys, FloatRegister, FloatRegister)
|
|
739 |
COMMON_INSTR_2(neg_float, fneg_s, fnegs, FloatRegister, FloatRegister)
|
|
740 |
COMMON_INSTR_2(abs_float, fabs_s, fabss, FloatRegister, FloatRegister)
|
|
741 |
COMMON_INSTR_2(sqrt_float, fsqrt_s, fsqrts, FloatRegister, FloatRegister)
|
|
742 |
COMMON_INSTR_2(cmp_float, fcmp_s, fcmps, FloatRegister, FloatRegister)
|
|
743 |
|
|
744 |
COMMON_INSTR_3(add_float, fadd_s, fadds, FloatRegister, FloatRegister, FloatRegister)
|
|
745 |
COMMON_INSTR_3(sub_float, fsub_s, fsubs, FloatRegister, FloatRegister, FloatRegister)
|
|
746 |
COMMON_INSTR_3(mul_float, fmul_s, fmuls, FloatRegister, FloatRegister, FloatRegister)
|
|
747 |
COMMON_INSTR_3(div_float, fdiv_s, fdivs, FloatRegister, FloatRegister, FloatRegister)
|
|
748 |
|
|
749 |
COMMON_INSTR_2(ldr_double, ldr_d, fldd, FloatRegister, Address)
|
|
750 |
COMMON_INSTR_2(str_double, str_d, fstd, FloatRegister, Address)
|
|
751 |
COMMON_INSTR_2(mov_double, fmov_d, fcpyd, FloatRegister, FloatRegister)
|
|
752 |
COMMON_INSTR_2(neg_double, fneg_d, fnegd, FloatRegister, FloatRegister)
|
|
753 |
COMMON_INSTR_2(cmp_double, fcmp_d, fcmpd, FloatRegister, FloatRegister)
|
|
754 |
COMMON_INSTR_2(abs_double, fabs_d, fabsd, FloatRegister, FloatRegister)
|
|
755 |
COMMON_INSTR_2(sqrt_double, fsqrt_d, fsqrtd, FloatRegister, FloatRegister)
|
|
756 |
|
|
757 |
COMMON_INSTR_3(add_double, fadd_d, faddd, FloatRegister, FloatRegister, FloatRegister)
|
|
758 |
COMMON_INSTR_3(sub_double, fsub_d, fsubd, FloatRegister, FloatRegister, FloatRegister)
|
|
759 |
COMMON_INSTR_3(mul_double, fmul_d, fmuld, FloatRegister, FloatRegister, FloatRegister)
|
|
760 |
COMMON_INSTR_3(div_double, fdiv_d, fdivd, FloatRegister, FloatRegister, FloatRegister)
|
|
761 |
|
|
762 |
COMMON_INSTR_2(convert_f2d, fcvt_ds, fcvtds, FloatRegister, FloatRegister)
|
|
763 |
COMMON_INSTR_2(convert_d2f, fcvt_sd, fcvtsd, FloatRegister, FloatRegister)
|
|
764 |
|
|
765 |
COMMON_INSTR_2(mov_fpr2gpr_float, fmov_ws, fmrs, Register, FloatRegister)
|
|
766 |
|
|
767 |
#undef COMMON_INSTR_1
|
|
768 |
#undef COMMON_INSTR_2
|
|
769 |
#undef COMMON_INSTR_3
|
|
770 |
|
|
771 |
|
|
772 |
#ifdef AARCH64
|
|
773 |
|
|
774 |
void mov(Register dst, Register src, AsmCondition cond) {
|
|
775 |
if (cond == al) {
|
|
776 |
mov(dst, src);
|
|
777 |
} else {
|
|
778 |
csel(dst, src, dst, cond);
|
|
779 |
}
|
|
780 |
}
|
|
781 |
|
|
782 |
// Propagate other overloaded "mov" methods from Assembler.
|
|
783 |
void mov(Register dst, Register src) { Assembler::mov(dst, src); }
|
|
784 |
void mov(Register rd, int imm) { Assembler::mov(rd, imm); }
|
|
785 |
|
|
786 |
void mov(Register dst, int imm, AsmCondition cond) {
|
|
787 |
assert(imm == 0 || imm == 1, "");
|
|
788 |
if (imm == 0) {
|
|
789 |
mov(dst, ZR, cond);
|
|
790 |
} else if (imm == 1) {
|
|
791 |
csinc(dst, dst, ZR, inverse(cond));
|
|
792 |
} else if (imm == -1) {
|
|
793 |
csinv(dst, dst, ZR, inverse(cond));
|
|
794 |
} else {
|
|
795 |
fatal("illegal mov(R%d,%d,cond)", dst->encoding(), imm);
|
|
796 |
}
|
|
797 |
}
|
|
798 |
|
|
799 |
void movs(Register dst, Register src) { adds(dst, src, 0); }
|
|
800 |
|
|
801 |
#else // AARCH64
|
|
802 |
|
|
803 |
void tbz(Register rt, int bit, Label& L) {
|
|
804 |
assert(0 <= bit && bit < BitsPerWord, "bit number is out of range");
|
|
805 |
tst(rt, 1 << bit);
|
|
806 |
b(L, eq);
|
|
807 |
}
|
|
808 |
|
|
809 |
void tbnz(Register rt, int bit, Label& L) {
|
|
810 |
assert(0 <= bit && bit < BitsPerWord, "bit number is out of range");
|
|
811 |
tst(rt, 1 << bit);
|
|
812 |
b(L, ne);
|
|
813 |
}
|
|
814 |
|
|
815 |
void cbz(Register rt, Label& L) {
|
|
816 |
cmp(rt, 0);
|
|
817 |
b(L, eq);
|
|
818 |
}
|
|
819 |
|
|
820 |
void cbz(Register rt, address target) {
|
|
821 |
cmp(rt, 0);
|
|
822 |
b(target, eq);
|
|
823 |
}
|
|
824 |
|
|
825 |
void cbnz(Register rt, Label& L) {
|
|
826 |
cmp(rt, 0);
|
|
827 |
b(L, ne);
|
|
828 |
}
|
|
829 |
|
|
830 |
void ret(Register dst = LR) {
|
|
831 |
bx(dst);
|
|
832 |
}
|
|
833 |
|
|
834 |
#endif // AARCH64
|
|
835 |
|
|
836 |
Register zero_register(Register tmp) {
|
|
837 |
#ifdef AARCH64
|
|
838 |
return ZR;
|
|
839 |
#else
|
|
840 |
mov(tmp, 0);
|
|
841 |
return tmp;
|
|
842 |
#endif
|
|
843 |
}
|
|
844 |
|
|
845 |
void logical_shift_left(Register dst, Register src, int shift) {
|
|
846 |
#ifdef AARCH64
|
|
847 |
_lsl(dst, src, shift);
|
|
848 |
#else
|
|
849 |
mov(dst, AsmOperand(src, lsl, shift));
|
|
850 |
#endif
|
|
851 |
}
|
|
852 |
|
|
853 |
void logical_shift_left_32(Register dst, Register src, int shift) {
|
|
854 |
#ifdef AARCH64
|
|
855 |
_lsl_w(dst, src, shift);
|
|
856 |
#else
|
|
857 |
mov(dst, AsmOperand(src, lsl, shift));
|
|
858 |
#endif
|
|
859 |
}
|
|
860 |
|
|
861 |
void logical_shift_right(Register dst, Register src, int shift) {
|
|
862 |
#ifdef AARCH64
|
|
863 |
_lsr(dst, src, shift);
|
|
864 |
#else
|
|
865 |
mov(dst, AsmOperand(src, lsr, shift));
|
|
866 |
#endif
|
|
867 |
}
|
|
868 |
|
|
869 |
void arith_shift_right(Register dst, Register src, int shift) {
|
|
870 |
#ifdef AARCH64
|
|
871 |
_asr(dst, src, shift);
|
|
872 |
#else
|
|
873 |
mov(dst, AsmOperand(src, asr, shift));
|
|
874 |
#endif
|
|
875 |
}
|
|
876 |
|
|
877 |
void asr_32(Register dst, Register src, int shift) {
|
|
878 |
#ifdef AARCH64
|
|
879 |
_asr_w(dst, src, shift);
|
|
880 |
#else
|
|
881 |
mov(dst, AsmOperand(src, asr, shift));
|
|
882 |
#endif
|
|
883 |
}
|
|
884 |
|
|
885 |
// If <cond> holds, compares r1 and r2. Otherwise, flags are set so that <cond> does not hold.
|
|
886 |
void cond_cmp(Register r1, Register r2, AsmCondition cond) {
|
|
887 |
#ifdef AARCH64
|
|
888 |
ccmp(r1, r2, flags_for_condition(inverse(cond)), cond);
|
|
889 |
#else
|
|
890 |
cmp(r1, r2, cond);
|
|
891 |
#endif
|
|
892 |
}
|
|
893 |
|
|
894 |
// If <cond> holds, compares r and imm. Otherwise, flags are set so that <cond> does not hold.
|
|
895 |
void cond_cmp(Register r, int imm, AsmCondition cond) {
|
|
896 |
#ifdef AARCH64
|
|
897 |
ccmp(r, imm, flags_for_condition(inverse(cond)), cond);
|
|
898 |
#else
|
|
899 |
cmp(r, imm, cond);
|
|
900 |
#endif
|
|
901 |
}
|
|
902 |
|
|
903 |
void align_reg(Register dst, Register src, int align) {
|
|
904 |
assert (is_power_of_2(align), "should be");
|
|
905 |
#ifdef AARCH64
|
|
906 |
andr(dst, src, ~(uintx)(align-1));
|
|
907 |
#else
|
|
908 |
bic(dst, src, align-1);
|
|
909 |
#endif
|
|
910 |
}
|
|
911 |
|
|
912 |
void prefetch_read(Address addr) {
|
|
913 |
#ifdef AARCH64
|
|
914 |
prfm(pldl1keep, addr);
|
|
915 |
#else
|
|
916 |
pld(addr);
|
|
917 |
#endif
|
|
918 |
}
|
|
919 |
|
|
920 |
void raw_push(Register r1, Register r2) {
|
|
921 |
#ifdef AARCH64
|
|
922 |
stp(r1, r2, Address(SP, -2*wordSize, pre_indexed));
|
|
923 |
#else
|
|
924 |
assert(r1->encoding() < r2->encoding(), "should be ordered");
|
|
925 |
push(RegisterSet(r1) | RegisterSet(r2));
|
|
926 |
#endif
|
|
927 |
}
|
|
928 |
|
|
929 |
void raw_pop(Register r1, Register r2) {
|
|
930 |
#ifdef AARCH64
|
|
931 |
ldp(r1, r2, Address(SP, 2*wordSize, post_indexed));
|
|
932 |
#else
|
|
933 |
assert(r1->encoding() < r2->encoding(), "should be ordered");
|
|
934 |
pop(RegisterSet(r1) | RegisterSet(r2));
|
|
935 |
#endif
|
|
936 |
}
|
|
937 |
|
|
938 |
void raw_push(Register r1, Register r2, Register r3) {
|
|
939 |
#ifdef AARCH64
|
|
940 |
raw_push(r1, r2);
|
|
941 |
raw_push(r3, ZR);
|
|
942 |
#else
|
|
943 |
assert(r1->encoding() < r2->encoding() && r2->encoding() < r3->encoding(), "should be ordered");
|
|
944 |
push(RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3));
|
|
945 |
#endif
|
|
946 |
}
|
|
947 |
|
|
948 |
void raw_pop(Register r1, Register r2, Register r3) {
|
|
949 |
#ifdef AARCH64
|
|
950 |
raw_pop(r3, ZR);
|
|
951 |
raw_pop(r1, r2);
|
|
952 |
#else
|
|
953 |
assert(r1->encoding() < r2->encoding() && r2->encoding() < r3->encoding(), "should be ordered");
|
|
954 |
pop(RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3));
|
|
955 |
#endif
|
|
956 |
}
|
|
957 |
|
|
958 |
// Restores registers r1 and r2 previously saved by raw_push(r1, r2, ret_addr) and returns by ret_addr. Clobbers LR.
|
|
959 |
void raw_pop_and_ret(Register r1, Register r2) {
|
|
960 |
#ifdef AARCH64
|
|
961 |
raw_pop(r1, r2, LR);
|
|
962 |
ret();
|
|
963 |
#else
|
|
964 |
raw_pop(r1, r2, PC);
|
|
965 |
#endif
|
|
966 |
}
|
|
967 |
|
|
968 |
void indirect_jump(Address addr, Register scratch) {
|
|
969 |
#ifdef AARCH64
|
|
970 |
ldr(scratch, addr);
|
|
971 |
br(scratch);
|
|
972 |
#else
|
|
973 |
ldr(PC, addr);
|
|
974 |
#endif
|
|
975 |
}
|
|
976 |
|
|
977 |
void indirect_jump(InlinedAddress& literal, Register scratch) {
|
|
978 |
#ifdef AARCH64
|
|
979 |
ldr_literal(scratch, literal);
|
|
980 |
br(scratch);
|
|
981 |
#else
|
|
982 |
ldr_literal(PC, literal);
|
|
983 |
#endif
|
|
984 |
}
|
|
985 |
|
|
986 |
#ifndef AARCH64
|
|
987 |
void neg(Register dst, Register src) {
|
|
988 |
rsb(dst, src, 0);
|
|
989 |
}
|
|
990 |
#endif
|
|
991 |
|
|
992 |
void branch_if_negative_32(Register r, Label& L) {
|
|
993 |
// Note about branch_if_negative_32() / branch_if_any_negative_32() implementation for AArch64:
|
|
994 |
// tbnz is not used instead of tst & b.mi because destination may be out of tbnz range (+-32KB)
|
|
995 |
// since these methods are used in LIR_Assembler::emit_arraycopy() to jump to stub entry.
|
|
996 |
tst_32(r, r);
|
|
997 |
b(L, mi);
|
|
998 |
}
|
|
999 |
|
|
1000 |
void branch_if_any_negative_32(Register r1, Register r2, Register tmp, Label& L) {
|
|
1001 |
#ifdef AARCH64
|
|
1002 |
orr_32(tmp, r1, r2);
|
|
1003 |
tst_32(tmp, tmp);
|
|
1004 |
#else
|
|
1005 |
orrs(tmp, r1, r2);
|
|
1006 |
#endif
|
|
1007 |
b(L, mi);
|
|
1008 |
}
|
|
1009 |
|
|
1010 |
void branch_if_any_negative_32(Register r1, Register r2, Register r3, Register tmp, Label& L) {
|
|
1011 |
orr_32(tmp, r1, r2);
|
|
1012 |
#ifdef AARCH64
|
|
1013 |
orr_32(tmp, tmp, r3);
|
|
1014 |
tst_32(tmp, tmp);
|
|
1015 |
#else
|
|
1016 |
orrs(tmp, tmp, r3);
|
|
1017 |
#endif
|
|
1018 |
b(L, mi);
|
|
1019 |
}
|
|
1020 |
|
|
1021 |
void add_ptr_scaled_int32(Register dst, Register r1, Register r2, int shift) {
|
|
1022 |
#ifdef AARCH64
|
|
1023 |
add(dst, r1, r2, ex_sxtw, shift);
|
|
1024 |
#else
|
|
1025 |
add(dst, r1, AsmOperand(r2, lsl, shift));
|
|
1026 |
#endif
|
|
1027 |
}
|
|
1028 |
|
|
1029 |
void sub_ptr_scaled_int32(Register dst, Register r1, Register r2, int shift) {
|
|
1030 |
#ifdef AARCH64
|
|
1031 |
sub(dst, r1, r2, ex_sxtw, shift);
|
|
1032 |
#else
|
|
1033 |
sub(dst, r1, AsmOperand(r2, lsl, shift));
|
|
1034 |
#endif
|
|
1035 |
}
|
|
1036 |
|
|
1037 |
|
|
1038 |
// klass oop manipulations if compressed
|
|
1039 |
|
|
1040 |
#ifdef AARCH64
|
|
1041 |
void load_klass(Register dst_klass, Register src_oop);
|
|
1042 |
#else
|
|
1043 |
void load_klass(Register dst_klass, Register src_oop, AsmCondition cond = al);
|
|
1044 |
#endif // AARCH64
|
|
1045 |
|
|
1046 |
void store_klass(Register src_klass, Register dst_oop);
|
|
1047 |
|
|
1048 |
#ifdef AARCH64
|
|
1049 |
void store_klass_gap(Register dst);
|
|
1050 |
#endif // AARCH64
|
|
1051 |
|
|
1052 |
// oop manipulations
|
|
1053 |
|
|
1054 |
void load_heap_oop(Register dst, Address src);
|
|
1055 |
void store_heap_oop(Register src, Address dst);
|
|
1056 |
void store_heap_oop(Address dst, Register src) {
|
|
1057 |
store_heap_oop(src, dst);
|
|
1058 |
}
|
|
1059 |
void store_heap_oop_null(Register src, Address dst);
|
|
1060 |
|
|
1061 |
#ifdef AARCH64
|
|
1062 |
void encode_heap_oop(Register dst, Register src);
|
|
1063 |
void encode_heap_oop(Register r) {
|
|
1064 |
encode_heap_oop(r, r);
|
|
1065 |
}
|
|
1066 |
void decode_heap_oop(Register dst, Register src);
|
|
1067 |
void decode_heap_oop(Register r) {
|
|
1068 |
decode_heap_oop(r, r);
|
|
1069 |
}
|
|
1070 |
|
|
1071 |
#ifdef COMPILER2
|
|
1072 |
void encode_heap_oop_not_null(Register dst, Register src);
|
|
1073 |
void decode_heap_oop_not_null(Register dst, Register src);
|
|
1074 |
|
|
1075 |
void set_narrow_klass(Register dst, Klass* k);
|
|
1076 |
void set_narrow_oop(Register dst, jobject obj);
|
|
1077 |
#endif
|
|
1078 |
|
|
1079 |
void encode_klass_not_null(Register r);
|
|
1080 |
void encode_klass_not_null(Register dst, Register src);
|
|
1081 |
void decode_klass_not_null(Register r);
|
|
1082 |
void decode_klass_not_null(Register dst, Register src);
|
|
1083 |
|
|
1084 |
void reinit_heapbase();
|
|
1085 |
|
|
1086 |
#ifdef ASSERT
|
|
1087 |
void verify_heapbase(const char* msg);
|
|
1088 |
#endif // ASSERT
|
|
1089 |
|
|
1090 |
static int instr_count_for_mov_slow(intptr_t c);
|
|
1091 |
static int instr_count_for_mov_slow(address addr);
|
|
1092 |
static int instr_count_for_decode_klass_not_null();
|
|
1093 |
#endif // AARCH64
|
|
1094 |
|
|
1095 |
void ldr_global_ptr(Register reg, address address_of_global);
|
|
1096 |
void ldr_global_s32(Register reg, address address_of_global);
|
|
1097 |
void ldrb_global(Register reg, address address_of_global);
|
|
1098 |
|
|
1099 |
// address_placeholder_instruction is invalid instruction and is used
|
|
1100 |
// as placeholder in code for address of label
|
|
1101 |
enum { address_placeholder_instruction = 0xFFFFFFFF };
|
|
1102 |
|
|
1103 |
void emit_address(Label& L) {
|
|
1104 |
assert(!L.is_bound(), "otherwise address will not be patched");
|
|
1105 |
target(L); // creates relocation which will be patched later
|
|
1106 |
|
|
1107 |
assert ((offset() & (wordSize-1)) == 0, "should be aligned by word size");
|
|
1108 |
|
|
1109 |
#ifdef AARCH64
|
|
1110 |
emit_int32(address_placeholder_instruction);
|
|
1111 |
emit_int32(address_placeholder_instruction);
|
|
1112 |
#else
|
|
1113 |
AbstractAssembler::emit_address((address)address_placeholder_instruction);
|
|
1114 |
#endif
|
|
1115 |
}
|
|
1116 |
|
|
1117 |
void b(address target, AsmCondition cond = al) {
|
|
1118 |
Assembler::b(target, cond); \
|
|
1119 |
}
|
|
1120 |
void b(Label& L, AsmCondition cond = al) {
|
|
1121 |
// internal jumps
|
|
1122 |
Assembler::b(target(L), cond);
|
|
1123 |
}
|
|
1124 |
|
|
1125 |
void bl(address target NOT_AARCH64_ARG(AsmCondition cond = al)) {
|
|
1126 |
Assembler::bl(target NOT_AARCH64_ARG(cond));
|
|
1127 |
}
|
|
1128 |
void bl(Label& L NOT_AARCH64_ARG(AsmCondition cond = al)) {
|
|
1129 |
// internal calls
|
|
1130 |
Assembler::bl(target(L) NOT_AARCH64_ARG(cond));
|
|
1131 |
}
|
|
1132 |
|
|
1133 |
#ifndef AARCH64
|
|
1134 |
void adr(Register dest, Label& L, AsmCondition cond = al) {
|
|
1135 |
int delta = target(L) - pc() - 8;
|
|
1136 |
if (delta >= 0) {
|
|
1137 |
add(dest, PC, delta, cond);
|
|
1138 |
} else {
|
|
1139 |
sub(dest, PC, -delta, cond);
|
|
1140 |
}
|
|
1141 |
}
|
|
1142 |
#endif // !AARCH64
|
|
1143 |
|
|
1144 |
// Variable-length jump and calls. We now distinguish only the
|
|
1145 |
// patchable case from the other cases. Patchable must be
|
|
1146 |
// distinguised from relocable. Relocable means the generated code
|
|
1147 |
// containing the jump/call may move. Patchable means that the
|
|
1148 |
// targeted address may be changed later.
|
|
1149 |
|
|
1150 |
// Non patchable versions.
|
|
1151 |
// - used only for relocInfo::runtime_call_type and relocInfo::none
|
|
1152 |
// - may use relative or absolute format (do not use relocInfo::none
|
|
1153 |
// if the generated code may move)
|
|
1154 |
// - the implementation takes into account switch to THUMB mode if the
|
|
1155 |
// destination is a THUMB address
|
|
1156 |
// - the implementation supports far targets
|
|
1157 |
//
|
|
1158 |
// To reduce regression risk, scratch still defaults to noreg on
|
|
1159 |
// arm32. This results in patchable instructions. However, if
|
|
1160 |
// patching really matters, the call sites should be modified and
|
|
1161 |
// use patchable_call or patchable_jump. If patching is not required
|
|
1162 |
// and if a register can be cloberred, it should be explicitly
|
|
1163 |
// specified to allow future optimizations.
|
|
1164 |
void jump(address target,
|
|
1165 |
relocInfo::relocType rtype = relocInfo::runtime_call_type,
|
|
1166 |
Register scratch = AARCH64_ONLY(Rtemp) NOT_AARCH64(noreg)
|
|
1167 |
#ifndef AARCH64
|
|
1168 |
, AsmCondition cond = al
|
|
1169 |
#endif
|
|
1170 |
);
|
|
1171 |
|
|
1172 |
void call(address target,
|
|
1173 |
RelocationHolder rspec
|
|
1174 |
NOT_AARCH64_ARG(AsmCondition cond = al));
|
|
1175 |
|
|
1176 |
void call(address target,
|
|
1177 |
relocInfo::relocType rtype = relocInfo::runtime_call_type
|
|
1178 |
NOT_AARCH64_ARG(AsmCondition cond = al)) {
|
|
1179 |
call(target, Relocation::spec_simple(rtype) NOT_AARCH64_ARG(cond));
|
|
1180 |
}
|
|
1181 |
|
|
1182 |
void jump(AddressLiteral dest) {
|
|
1183 |
jump(dest.target(), dest.reloc());
|
|
1184 |
}
|
|
1185 |
#ifndef AARCH64
|
|
1186 |
void jump(address dest, relocInfo::relocType rtype, AsmCondition cond) {
|
|
1187 |
jump(dest, rtype, Rtemp, cond);
|
|
1188 |
}
|
|
1189 |
#endif
|
|
1190 |
|
|
1191 |
void call(AddressLiteral dest) {
|
|
1192 |
call(dest.target(), dest.reloc());
|
|
1193 |
}
|
|
1194 |
|
|
1195 |
// Patchable version:
|
|
1196 |
// - set_destination can be used to atomically change the target
|
|
1197 |
//
|
|
1198 |
// The targets for patchable_jump and patchable_call must be in the
|
|
1199 |
// code cache.
|
|
1200 |
// [ including possible extensions of the code cache, like AOT code ]
|
|
1201 |
//
|
|
1202 |
// To reduce regression risk, scratch still defaults to noreg on
|
|
1203 |
// arm32. If a register can be cloberred, it should be explicitly
|
|
1204 |
// specified to allow future optimizations.
|
|
1205 |
void patchable_jump(address target,
|
|
1206 |
relocInfo::relocType rtype = relocInfo::runtime_call_type,
|
|
1207 |
Register scratch = AARCH64_ONLY(Rtemp) NOT_AARCH64(noreg)
|
|
1208 |
#ifndef AARCH64
|
|
1209 |
, AsmCondition cond = al
|
|
1210 |
#endif
|
|
1211 |
);
|
|
1212 |
|
|
1213 |
// patchable_call may scratch Rtemp
|
|
1214 |
int patchable_call(address target,
|
|
1215 |
RelocationHolder const& rspec,
|
|
1216 |
bool c2 = false);
|
|
1217 |
|
|
1218 |
int patchable_call(address target,
|
|
1219 |
relocInfo::relocType rtype,
|
|
1220 |
bool c2 = false) {
|
|
1221 |
return patchable_call(target, Relocation::spec_simple(rtype), c2);
|
|
1222 |
}
|
|
1223 |
|
|
1224 |
#if defined(AARCH64) && defined(COMPILER2)
|
|
1225 |
static int call_size(address target, bool far, bool patchable);
|
|
1226 |
#endif
|
|
1227 |
|
|
1228 |
#ifdef AARCH64
|
|
1229 |
static bool page_reachable_from_cache(address target);
|
|
1230 |
#endif
|
|
1231 |
static bool _reachable_from_cache(address target);
|
|
1232 |
static bool _cache_fully_reachable();
|
|
1233 |
bool cache_fully_reachable();
|
|
1234 |
bool reachable_from_cache(address target);
|
|
1235 |
|
|
1236 |
void zero_extend(Register rd, Register rn, int bits);
|
|
1237 |
void sign_extend(Register rd, Register rn, int bits);
|
|
1238 |
|
|
1239 |
inline void zap_high_non_significant_bits(Register r) {
|
|
1240 |
#ifdef AARCH64
|
|
1241 |
if(ZapHighNonSignificantBits) {
|
|
1242 |
movk(r, 0xBAAD, 48);
|
|
1243 |
movk(r, 0xF00D, 32);
|
|
1244 |
}
|
|
1245 |
#endif
|
|
1246 |
}
|
|
1247 |
|
|
1248 |
#ifndef AARCH64
|
|
1249 |
void long_move(Register rd_lo, Register rd_hi,
|
|
1250 |
Register rn_lo, Register rn_hi,
|
|
1251 |
AsmCondition cond = al);
|
|
1252 |
void long_shift(Register rd_lo, Register rd_hi,
|
|
1253 |
Register rn_lo, Register rn_hi,
|
|
1254 |
AsmShift shift, Register count);
|
|
1255 |
void long_shift(Register rd_lo, Register rd_hi,
|
|
1256 |
Register rn_lo, Register rn_hi,
|
|
1257 |
AsmShift shift, int count);
|
|
1258 |
|
|
1259 |
void atomic_cas(Register tmpreg1, Register tmpreg2, Register oldval, Register newval, Register base, int offset);
|
|
1260 |
void atomic_cas_bool(Register oldval, Register newval, Register base, int offset, Register tmpreg);
|
|
1261 |
void atomic_cas64(Register temp_lo, Register temp_hi, Register temp_result, Register oldval_lo, Register oldval_hi, Register newval_lo, Register newval_hi, Register base, int offset);
|
|
1262 |
#endif // !AARCH64
|
|
1263 |
|
|
1264 |
void cas_for_lock_acquire(Register oldval, Register newval, Register base, Register tmp, Label &slow_case, bool allow_fallthrough_on_failure = false, bool one_shot = false);
|
|
1265 |
void cas_for_lock_release(Register oldval, Register newval, Register base, Register tmp, Label &slow_case, bool allow_fallthrough_on_failure = false, bool one_shot = false);
|
|
1266 |
|
|
1267 |
#ifndef PRODUCT
|
|
1268 |
// Preserves flags and all registers.
|
|
1269 |
// On SMP the updated value might not be visible to external observers without a sychronization barrier
|
|
1270 |
void cond_atomic_inc32(AsmCondition cond, int* counter_addr);
|
|
1271 |
#endif // !PRODUCT
|
|
1272 |
|
|
1273 |
// unconditional non-atomic increment
|
|
1274 |
void inc_counter(address counter_addr, Register tmpreg1, Register tmpreg2);
|
|
1275 |
void inc_counter(int* counter_addr, Register tmpreg1, Register tmpreg2) {
|
|
1276 |
inc_counter((address) counter_addr, tmpreg1, tmpreg2);
|
|
1277 |
}
|
|
1278 |
|
|
1279 |
void pd_patch_instruction(address branch, address target);
|
|
1280 |
|
|
1281 |
// Loading and storing values by size and signed-ness;
|
|
1282 |
// size must not exceed wordSize (i.e. 8-byte values are not supported on 32-bit ARM);
|
|
1283 |
// each of these calls generates exactly one load or store instruction,
|
|
1284 |
// so src can be pre- or post-indexed address.
|
|
1285 |
#ifdef AARCH64
|
|
1286 |
void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed);
|
|
1287 |
void store_sized_value(Register src, Address dst, size_t size_in_bytes);
|
|
1288 |
#else
|
|
1289 |
// 32-bit ARM variants also support conditional execution
|
|
1290 |
void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, AsmCondition cond = al);
|
|
1291 |
void store_sized_value(Register src, Address dst, size_t size_in_bytes, AsmCondition cond = al);
|
|
1292 |
#endif
|
|
1293 |
|
|
1294 |
void lookup_interface_method(Register recv_klass,
|
|
1295 |
Register intf_klass,
|
|
1296 |
Register itable_index,
|
|
1297 |
Register method_result,
|
|
1298 |
Register temp_reg1,
|
|
1299 |
Register temp_reg2,
|
|
1300 |
Label& L_no_such_interface);
|
|
1301 |
|
|
1302 |
// Compare char[] arrays aligned to 4 bytes.
|
|
1303 |
void char_arrays_equals(Register ary1, Register ary2,
|
|
1304 |
Register limit, Register result,
|
|
1305 |
Register chr1, Register chr2, Label& Ldone);
|
|
1306 |
|
|
1307 |
|
|
1308 |
void floating_cmp(Register dst);
|
|
1309 |
|
|
1310 |
// improved x86 portability (minimizing source code changes)
|
|
1311 |
|
|
1312 |
void ldr_literal(Register rd, AddressLiteral addr) {
|
|
1313 |
relocate(addr.rspec());
|
|
1314 |
#ifdef AARCH64
|
|
1315 |
ldr(rd, addr.target());
|
|
1316 |
#else
|
|
1317 |
ldr(rd, Address(PC, addr.target() - pc() - 8));
|
|
1318 |
#endif
|
|
1319 |
}
|
|
1320 |
|
|
1321 |
void lea(Register Rd, AddressLiteral addr) {
|
|
1322 |
// Never dereferenced, as on x86 (lval status ignored)
|
|
1323 |
mov_address(Rd, addr.target(), addr.rspec());
|
|
1324 |
}
|
|
1325 |
|
|
1326 |
void restore_default_fp_mode();
|
|
1327 |
|
|
1328 |
#ifdef COMPILER2
|
|
1329 |
#ifdef AARCH64
|
|
1330 |
// Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
|
|
1331 |
void fast_lock(Register obj, Register box, Register scratch, Register scratch2, Register scratch3);
|
|
1332 |
void fast_unlock(Register obj, Register box, Register scratch, Register scratch2, Register scratch3);
|
|
1333 |
#else
|
|
1334 |
void fast_lock(Register obj, Register box, Register scratch, Register scratch2);
|
|
1335 |
void fast_unlock(Register obj, Register box, Register scratch, Register scratch2);
|
|
1336 |
#endif
|
|
1337 |
#endif
|
|
1338 |
|
|
1339 |
#ifdef AARCH64
|
|
1340 |
|
|
1341 |
#define F(mnemonic) \
|
|
1342 |
void mnemonic(Register rt, address target) { \
|
|
1343 |
Assembler::mnemonic(rt, target); \
|
|
1344 |
} \
|
|
1345 |
void mnemonic(Register rt, Label& L) { \
|
|
1346 |
Assembler::mnemonic(rt, target(L)); \
|
|
1347 |
}
|
|
1348 |
|
|
1349 |
F(cbz_w);
|
|
1350 |
F(cbnz_w);
|
|
1351 |
F(cbz);
|
|
1352 |
F(cbnz);
|
|
1353 |
|
|
1354 |
#undef F
|
|
1355 |
|
|
1356 |
#define F(mnemonic) \
|
|
1357 |
void mnemonic(Register rt, int bit, address target) { \
|
|
1358 |
Assembler::mnemonic(rt, bit, target); \
|
|
1359 |
} \
|
|
1360 |
void mnemonic(Register rt, int bit, Label& L) { \
|
|
1361 |
Assembler::mnemonic(rt, bit, target(L)); \
|
|
1362 |
}
|
|
1363 |
|
|
1364 |
F(tbz);
|
|
1365 |
F(tbnz);
|
|
1366 |
#undef F
|
|
1367 |
|
|
1368 |
#endif // AARCH64
|
|
1369 |
|
|
1370 |
};
|
|
1371 |
|
|
1372 |
|
|
1373 |
// The purpose of this class is to build several code fragments of the same size
|
|
1374 |
// in order to allow fast table branch.
|
|
1375 |
|
|
1376 |
class FixedSizeCodeBlock VALUE_OBJ_CLASS_SPEC {
|
|
1377 |
public:
|
|
1378 |
FixedSizeCodeBlock(MacroAssembler* masm, int size_in_instrs, bool enabled);
|
|
1379 |
~FixedSizeCodeBlock();
|
|
1380 |
|
|
1381 |
private:
|
|
1382 |
MacroAssembler* _masm;
|
|
1383 |
address _start;
|
|
1384 |
int _size_in_instrs;
|
|
1385 |
bool _enabled;
|
|
1386 |
};
|
|
1387 |
|
|
1388 |
|
|
1389 |
#endif // CPU_ARM_VM_MACROASSEMBLER_ARM_HPP
|
|
1390 |
|