author | jwilhelm |
Thu, 12 Sep 2019 03:21:11 +0200 | |
changeset 58094 | 0f6c749acd15 |
parent 53686 | 3047cf8c3bc2 |
permissions | -rw-r--r-- |
42664 | 1 |
/* |
53244
9807daeb47c4
8216167: Update include guards to reflect correct directories
coleenp
parents:
53061
diff
changeset
|
2 |
* Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved. |
42664 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 |
* or visit www.oracle.com if you need additional information or have any |
|
21 |
* questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
53244
9807daeb47c4
8216167: Update include guards to reflect correct directories
coleenp
parents:
53061
diff
changeset
|
25 |
#ifndef CPU_ARM_MACROASSEMBLER_ARM_HPP |
9807daeb47c4
8216167: Update include guards to reflect correct directories
coleenp
parents:
53061
diff
changeset
|
26 |
#define CPU_ARM_MACROASSEMBLER_ARM_HPP |
42664 | 27 |
|
28 |
#include "code/relocInfo.hpp" |
|
29 |
||
30 |
class BiasedLockingCounters; |
|
31 |
||
32 |
// Introduced AddressLiteral and its subclasses to ease portability from |
|
33 |
// x86 and avoid relocation issues |
|
49364
601146c66cad
8173070: Remove ValueObj class for allocation subclassing for runtime code
coleenp
parents:
49010
diff
changeset
|
34 |
class AddressLiteral { |
42664 | 35 |
RelocationHolder _rspec; |
36 |
// Typically we use AddressLiterals we want to use their rval |
|
37 |
// However in some situations we want the lval (effect address) of the item. |
|
38 |
// We provide a special factory for making those lvals. |
|
39 |
bool _is_lval; |
|
40 |
||
41 |
address _target; |
|
42 |
||
43 |
private: |
|
44 |
static relocInfo::relocType reloc_for_target(address target) { |
|
45 |
// Used for ExternalAddress or when the type is not specified |
|
46 |
// Sometimes ExternalAddress is used for values which aren't |
|
47 |
// exactly addresses, like the card table base. |
|
48 |
// external_word_type can't be used for values in the first page |
|
49 |
// so just skip the reloc in that case. |
|
50 |
return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none; |
|
51 |
} |
|
52 |
||
53 |
void set_rspec(relocInfo::relocType rtype); |
|
54 |
||
55 |
protected: |
|
56 |
// creation |
|
57 |
AddressLiteral() |
|
58 |
: _is_lval(false), |
|
59 |
_target(NULL) |
|
60 |
{} |
|
61 |
||
62 |
public: |
|
63 |
||
64 |
AddressLiteral(address target, relocInfo::relocType rtype) { |
|
65 |
_is_lval = false; |
|
66 |
_target = target; |
|
67 |
set_rspec(rtype); |
|
68 |
} |
|
69 |
||
70 |
AddressLiteral(address target, RelocationHolder const& rspec) |
|
71 |
: _rspec(rspec), |
|
72 |
_is_lval(false), |
|
73 |
_target(target) |
|
74 |
{} |
|
75 |
||
76 |
AddressLiteral(address target) { |
|
77 |
_is_lval = false; |
|
78 |
_target = target; |
|
79 |
set_rspec(reloc_for_target(target)); |
|
80 |
} |
|
81 |
||
82 |
AddressLiteral addr() { |
|
83 |
AddressLiteral ret = *this; |
|
84 |
ret._is_lval = true; |
|
85 |
return ret; |
|
86 |
} |
|
87 |
||
88 |
private: |
|
89 |
||
90 |
address target() { return _target; } |
|
91 |
bool is_lval() { return _is_lval; } |
|
92 |
||
93 |
relocInfo::relocType reloc() const { return _rspec.type(); } |
|
94 |
const RelocationHolder& rspec() const { return _rspec; } |
|
95 |
||
96 |
friend class Assembler; |
|
97 |
friend class MacroAssembler; |
|
98 |
friend class Address; |
|
99 |
friend class LIR_Assembler; |
|
100 |
friend class InlinedAddress; |
|
101 |
}; |
|
102 |
||
103 |
class ExternalAddress: public AddressLiteral { |
|
104 |
||
105 |
public: |
|
106 |
||
107 |
ExternalAddress(address target) : AddressLiteral(target) {} |
|
108 |
||
109 |
}; |
|
110 |
||
111 |
class InternalAddress: public AddressLiteral { |
|
112 |
||
113 |
public: |
|
114 |
||
115 |
InternalAddress(address target) : AddressLiteral(target, relocInfo::internal_word_type) {} |
|
116 |
||
117 |
}; |
|
118 |
||
119 |
// Inlined constants, for use with ldr_literal / bind_literal |
|
120 |
// Note: InlinedInteger not supported (use move_slow(Register,int[,cond])) |
|
121 |
class InlinedLiteral: StackObj { |
|
122 |
public: |
|
123 |
Label label; // need to be public for direct access with & |
|
124 |
InlinedLiteral() { |
|
125 |
} |
|
126 |
}; |
|
127 |
||
128 |
class InlinedMetadata: public InlinedLiteral { |
|
129 |
private: |
|
130 |
Metadata *_data; |
|
131 |
||
132 |
public: |
|
133 |
InlinedMetadata(Metadata *data): InlinedLiteral() { |
|
134 |
_data = data; |
|
135 |
} |
|
136 |
Metadata *data() { return _data; } |
|
137 |
}; |
|
138 |
||
139 |
// Currently unused |
|
140 |
// class InlinedOop: public InlinedLiteral { |
|
141 |
// private: |
|
142 |
// jobject _jobject; |
|
143 |
// |
|
144 |
// public: |
|
145 |
// InlinedOop(jobject target): InlinedLiteral() { |
|
146 |
// _jobject = target; |
|
147 |
// } |
|
148 |
// jobject jobject() { return _jobject; } |
|
149 |
// }; |
|
150 |
||
151 |
class InlinedAddress: public InlinedLiteral { |
|
152 |
private: |
|
153 |
AddressLiteral _literal; |
|
154 |
||
155 |
public: |
|
156 |
||
157 |
InlinedAddress(jobject object): InlinedLiteral(), _literal((address)object, relocInfo::oop_type) { |
|
158 |
ShouldNotReachHere(); // use mov_oop (or implement InlinedOop) |
|
159 |
} |
|
160 |
||
161 |
InlinedAddress(Metadata *data): InlinedLiteral(), _literal((address)data, relocInfo::metadata_type) { |
|
162 |
ShouldNotReachHere(); // use InlinedMetadata or mov_metadata |
|
163 |
} |
|
164 |
||
165 |
InlinedAddress(address target, const RelocationHolder &rspec): InlinedLiteral(), _literal(target, rspec) { |
|
166 |
assert(rspec.type() != relocInfo::oop_type, "Do not use InlinedAddress for oops"); |
|
167 |
assert(rspec.type() != relocInfo::metadata_type, "Do not use InlinedAddress for metadatas"); |
|
168 |
} |
|
169 |
||
170 |
InlinedAddress(address target, relocInfo::relocType rtype): InlinedLiteral(), _literal(target, rtype) { |
|
171 |
assert(rtype != relocInfo::oop_type, "Do not use InlinedAddress for oops"); |
|
172 |
assert(rtype != relocInfo::metadata_type, "Do not use InlinedAddress for metadatas"); |
|
173 |
} |
|
174 |
||
175 |
// Note: default is relocInfo::none for InlinedAddress |
|
176 |
InlinedAddress(address target): InlinedLiteral(), _literal(target, relocInfo::none) { |
|
177 |
} |
|
178 |
||
179 |
address target() { return _literal.target(); } |
|
180 |
||
181 |
const RelocationHolder& rspec() const { return _literal.rspec(); } |
|
182 |
}; |
|
183 |
||
184 |
class InlinedString: public InlinedLiteral { |
|
185 |
private: |
|
186 |
const char* _msg; |
|
187 |
||
188 |
public: |
|
189 |
InlinedString(const char* msg): InlinedLiteral() { |
|
190 |
_msg = msg; |
|
191 |
} |
|
192 |
const char* msg() { return _msg; } |
|
193 |
}; |
|
194 |
||
195 |
class MacroAssembler: public Assembler { |
|
196 |
protected: |
|
197 |
||
198 |
// Support for VM calls |
|
199 |
// |
|
200 |
||
201 |
// This is the base routine called by the different versions of call_VM_leaf. |
|
202 |
void call_VM_leaf_helper(address entry_point, int number_of_arguments); |
|
203 |
||
204 |
// This is the base routine called by the different versions of call_VM. The interpreter |
|
205 |
// may customize this version by overriding it for its purposes (e.g., to save/restore |
|
206 |
// additional registers when doing a VM call). |
|
207 |
virtual void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions); |
|
46294
345a46524a19
8172020: Internal Error (cpu/arm/vm/frame_arm.cpp:571): assert(obj == __null || Universe::heap()->is_in(obj)) failed: sanity check #
cjplummer
parents:
42664
diff
changeset
|
208 |
public: |
345a46524a19
8172020: Internal Error (cpu/arm/vm/frame_arm.cpp:571): assert(obj == __null || Universe::heap()->is_in(obj)) failed: sanity check #
cjplummer
parents:
42664
diff
changeset
|
209 |
|
345a46524a19
8172020: Internal Error (cpu/arm/vm/frame_arm.cpp:571): assert(obj == __null || Universe::heap()->is_in(obj)) failed: sanity check #
cjplummer
parents:
42664
diff
changeset
|
210 |
MacroAssembler(CodeBuffer* code) : Assembler(code) {} |
42664 | 211 |
|
212 |
// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. |
|
213 |
// The implementation is only non-empty for the InterpreterMacroAssembler, |
|
214 |
// as only the interpreter handles PopFrame and ForceEarlyReturn requests. |
|
215 |
virtual void check_and_handle_popframe() {} |
|
216 |
virtual void check_and_handle_earlyret() {} |
|
217 |
||
218 |
// By default, we do not need relocation information for non |
|
219 |
// patchable absolute addresses. However, when needed by some |
|
220 |
// extensions, ignore_non_patchable_relocations can be modified, |
|
221 |
// returning false to preserve all relocation information. |
|
222 |
inline bool ignore_non_patchable_relocations() { return true; } |
|
223 |
||
224 |
// Initially added to the Assembler interface as a pure virtual: |
|
225 |
// RegisterConstant delayed_value(..) |
|
226 |
// for: |
|
227 |
// 6812678 macro assembler needs delayed binding of a few constants (for 6655638) |
|
228 |
// this was subsequently modified to its present name and return type |
|
229 |
virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, Register tmp, int offset); |
|
230 |
||
231 |
||
232 |
void align(int modulus); |
|
233 |
||
234 |
// Support for VM calls |
|
235 |
// |
|
236 |
// It is imperative that all calls into the VM are handled via the call_VM methods. |
|
237 |
// They make sure that the stack linkage is setup correctly. call_VM's correspond |
|
238 |
// to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. |
|
239 |
||
240 |
void call_VM(Register oop_result, address entry_point, bool check_exceptions = true); |
|
241 |
void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true); |
|
242 |
void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); |
|
243 |
void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); |
|
244 |
||
245 |
// The following methods are required by templateTable.cpp, |
|
246 |
// but not used on ARM. |
|
247 |
void call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); |
|
248 |
void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); |
|
249 |
void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); |
|
250 |
void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); |
|
251 |
||
252 |
// Note: The super_call_VM calls are not used on ARM |
|
253 |
||
254 |
// Raw call, without saving/restoring registers, exception handling, etc. |
|
255 |
// Mainly used from various stubs. |
|
256 |
// Note: if 'save_R9_if_scratched' is true, call_VM may on some |
|
257 |
// platforms save values on the stack. Set it to false (and handle |
|
258 |
// R9 in the callers) if the top of the stack must not be modified |
|
259 |
// by call_VM. |
|
260 |
void call_VM(address entry_point, bool save_R9_if_scratched); |
|
261 |
||
262 |
void call_VM_leaf(address entry_point); |
|
263 |
void call_VM_leaf(address entry_point, Register arg_1); |
|
264 |
void call_VM_leaf(address entry_point, Register arg_1, Register arg_2); |
|
265 |
void call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); |
|
266 |
void call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); |
|
267 |
||
268 |
void get_vm_result(Register oop_result, Register tmp); |
|
269 |
void get_vm_result_2(Register metadata_result, Register tmp); |
|
270 |
||
271 |
// Always sets/resets sp, which default to SP if (last_sp == noreg) |
|
272 |
// Optionally sets/resets fp (use noreg to avoid setting it) |
|
52351 | 273 |
// Optionally sets/resets pc depending on save_last_java_pc flag |
42664 | 274 |
// Note: when saving PC, set_last_Java_frame returns PC's offset in the code section |
275 |
// (for oop_maps offset computation) |
|
276 |
int set_last_Java_frame(Register last_sp, Register last_fp, bool save_last_java_pc, Register tmp); |
|
277 |
void reset_last_Java_frame(Register tmp); |
|
278 |
// status set in set_last_Java_frame for reset_last_Java_frame |
|
279 |
bool _fp_saved; |
|
280 |
bool _pc_saved; |
|
281 |
||
282 |
#ifdef PRODUCT |
|
283 |
#define BLOCK_COMMENT(str) /* nothing */ |
|
284 |
#define STOP(error) __ stop(error) |
|
285 |
#else |
|
286 |
#define BLOCK_COMMENT(str) __ block_comment(str) |
|
287 |
#define STOP(error) __ block_comment(error); __ stop(error) |
|
288 |
#endif |
|
289 |
||
290 |
void lookup_virtual_method(Register recv_klass, |
|
291 |
Register vtable_index, |
|
292 |
Register method_result); |
|
293 |
||
294 |
// Test sub_klass against super_klass, with fast and slow paths. |
|
295 |
||
296 |
// The fast path produces a tri-state answer: yes / no / maybe-slow. |
|
297 |
// One of the three labels can be NULL, meaning take the fall-through. |
|
298 |
// No registers are killed, except temp_regs. |
|
299 |
void check_klass_subtype_fast_path(Register sub_klass, |
|
300 |
Register super_klass, |
|
301 |
Register temp_reg, |
|
302 |
Register temp_reg2, |
|
303 |
Label* L_success, |
|
304 |
Label* L_failure, |
|
305 |
Label* L_slow_path); |
|
306 |
||
307 |
// The rest of the type check; must be wired to a corresponding fast path. |
|
308 |
// It does not repeat the fast path logic, so don't use it standalone. |
|
309 |
// temp_reg3 can be noreg, if no temps are available. |
|
310 |
// Updates the sub's secondary super cache as necessary. |
|
311 |
// If set_cond_codes: |
|
312 |
// - condition codes will be Z on success, NZ on failure. |
|
313 |
// - temp_reg will be 0 on success, non-0 on failure |
|
314 |
void check_klass_subtype_slow_path(Register sub_klass, |
|
315 |
Register super_klass, |
|
316 |
Register temp_reg, |
|
317 |
Register temp_reg2, |
|
318 |
Register temp_reg3, // auto assigned if noreg |
|
319 |
Label* L_success, |
|
320 |
Label* L_failure, |
|
321 |
bool set_cond_codes = false); |
|
322 |
||
323 |
// Simplified, combined version, good for typical uses. |
|
324 |
// temp_reg3 can be noreg, if no temps are available. It is used only on slow path. |
|
325 |
// Falls through on failure. |
|
326 |
void check_klass_subtype(Register sub_klass, |
|
327 |
Register super_klass, |
|
328 |
Register temp_reg, |
|
329 |
Register temp_reg2, |
|
330 |
Register temp_reg3, // auto assigned on slow path if noreg |
|
331 |
Label& L_success); |
|
332 |
||
333 |
// Returns address of receiver parameter, using tmp as base register. tmp and params_count can be the same. |
|
334 |
Address receiver_argument_address(Register params_base, Register params_count, Register tmp); |
|
335 |
||
336 |
void _verify_oop(Register reg, const char* s, const char* file, int line); |
|
337 |
void _verify_oop_addr(Address addr, const char * s, const char* file, int line); |
|
338 |
||
339 |
// TODO: verify method and klass metadata (compare against vptr?) |
|
340 |
void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} |
|
341 |
void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line) {} |
|
342 |
||
343 |
#define verify_oop(reg) _verify_oop(reg, "broken oop " #reg, __FILE__, __LINE__) |
|
344 |
#define verify_oop_addr(addr) _verify_oop_addr(addr, "broken oop ", __FILE__, __LINE__) |
|
345 |
#define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) |
|
346 |
#define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) |
|
347 |
||
348 |
void null_check(Register reg, Register tmp, int offset = -1); |
|
349 |
inline void null_check(Register reg) { null_check(reg, noreg, -1); } // for C1 lir_null_check |
|
350 |
||
351 |
// Puts address of allocated object into register `obj` and end of allocated object into register `obj_end`. |
|
352 |
void eden_allocate(Register obj, Register obj_end, Register tmp1, Register tmp2, |
|
353 |
RegisterOrConstant size_expression, Label& slow_case); |
|
354 |
void tlab_allocate(Register obj, Register obj_end, Register tmp1, |
|
355 |
RegisterOrConstant size_expression, Label& slow_case); |
|
356 |
||
357 |
void zero_memory(Register start, Register end, Register tmp); |
|
358 |
||
359 |
static bool needs_explicit_null_check(intptr_t offset); |
|
52462
4ad404da0088
8213199: GC abstraction for Assembler::needs_explicit_null_check()
rkennke
parents:
52351
diff
changeset
|
360 |
static bool uses_implicit_null_check(void* address); |
42664 | 361 |
|
362 |
void arm_stack_overflow_check(int frame_size_in_bytes, Register tmp); |
|
363 |
void arm_stack_overflow_check(Register Rsize, Register tmp); |
|
364 |
||
365 |
void bang_stack_with_offset(int offset) { |
|
366 |
ShouldNotReachHere(); |
|
367 |
} |
|
368 |
||
369 |
// Biased locking support |
|
370 |
// lock_reg and obj_reg must be loaded up with the appropriate values. |
|
371 |
// swap_reg must be supplied. |
|
372 |
// tmp_reg must be supplied. |
|
53061
5da72d7e0e80
8214512: ARM32: Jtreg test compiler/c2/Test8062950.java fails on ARM
dlong
parents:
52676
diff
changeset
|
373 |
// Done label is branched to with condition code EQ set if the lock is |
5da72d7e0e80
8214512: ARM32: Jtreg test compiler/c2/Test8062950.java fails on ARM
dlong
parents:
52676
diff
changeset
|
374 |
// biased and we acquired it. Slow case label is branched to with |
5da72d7e0e80
8214512: ARM32: Jtreg test compiler/c2/Test8062950.java fails on ARM
dlong
parents:
52676
diff
changeset
|
375 |
// condition code NE set if the lock is biased but we failed to acquire |
5da72d7e0e80
8214512: ARM32: Jtreg test compiler/c2/Test8062950.java fails on ARM
dlong
parents:
52676
diff
changeset
|
376 |
// it. Otherwise fall through. |
42664 | 377 |
// Returns offset of first potentially-faulting instruction for null |
378 |
// check info (currently consumed only by C1). If |
|
379 |
// swap_reg_contains_mark is true then returns -1 as it is assumed |
|
380 |
// the calling code has already passed any potential faults. |
|
381 |
// Notes: |
|
382 |
// - swap_reg and tmp_reg are scratched |
|
383 |
// - Rtemp was (implicitly) scratched and can now be specified as the tmp2 |
|
384 |
int biased_locking_enter(Register obj_reg, Register swap_reg, Register tmp_reg, |
|
385 |
bool swap_reg_contains_mark, |
|
386 |
Register tmp2, |
|
387 |
Label& done, Label& slow_case, |
|
388 |
BiasedLockingCounters* counters = NULL); |
|
389 |
void biased_locking_exit(Register obj_reg, Register temp_reg, Label& done); |
|
390 |
||
391 |
// Building block for CAS cases of biased locking: makes CAS and records statistics. |
|
392 |
// Optional slow_case label is used to transfer control if CAS fails. Otherwise leaves condition codes set. |
|
393 |
void biased_locking_enter_with_cas(Register obj_reg, Register old_mark_reg, Register new_mark_reg, |
|
394 |
Register tmp, Label& slow_case, int* counter_addr); |
|
395 |
||
44406
a46a6c4d1dd9
8176100: [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles
mgerdin
parents:
44093
diff
changeset
|
396 |
void resolve_jobject(Register value, Register tmp1, Register tmp2); |
a46a6c4d1dd9
8176100: [REDO][REDO] G1 Needs pre barrier on dereference of weak JNI handles
mgerdin
parents:
44093
diff
changeset
|
397 |
|
42664 | 398 |
void nop() { |
399 |
mov(R0, R0); |
|
400 |
} |
|
401 |
||
402 |
void push(Register rd, AsmCondition cond = al) { |
|
403 |
assert(rd != SP, "unpredictable instruction"); |
|
404 |
str(rd, Address(SP, -wordSize, pre_indexed), cond); |
|
405 |
} |
|
406 |
||
407 |
void push(RegisterSet reg_set, AsmCondition cond = al) { |
|
408 |
assert(!reg_set.contains(SP), "unpredictable instruction"); |
|
409 |
stmdb(SP, reg_set, writeback, cond); |
|
410 |
} |
|
411 |
||
412 |
void pop(Register rd, AsmCondition cond = al) { |
|
413 |
assert(rd != SP, "unpredictable instruction"); |
|
414 |
ldr(rd, Address(SP, wordSize, post_indexed), cond); |
|
415 |
} |
|
416 |
||
417 |
void pop(RegisterSet reg_set, AsmCondition cond = al) { |
|
418 |
assert(!reg_set.contains(SP), "unpredictable instruction"); |
|
419 |
ldmia(SP, reg_set, writeback, cond); |
|
420 |
} |
|
421 |
||
422 |
void fpushd(FloatRegister fd, AsmCondition cond = al) { |
|
423 |
fstmdbd(SP, FloatRegisterSet(fd), writeback, cond); |
|
424 |
} |
|
425 |
||
426 |
void fpushs(FloatRegister fd, AsmCondition cond = al) { |
|
427 |
fstmdbs(SP, FloatRegisterSet(fd), writeback, cond); |
|
428 |
} |
|
429 |
||
430 |
void fpopd(FloatRegister fd, AsmCondition cond = al) { |
|
431 |
fldmiad(SP, FloatRegisterSet(fd), writeback, cond); |
|
432 |
} |
|
433 |
||
434 |
void fpops(FloatRegister fd, AsmCondition cond = al) { |
|
435 |
fldmias(SP, FloatRegisterSet(fd), writeback, cond); |
|
436 |
} |
|
437 |
||
438 |
// Order access primitives |
|
439 |
enum Membar_mask_bits { |
|
440 |
StoreStore = 1 << 3, |
|
441 |
LoadStore = 1 << 2, |
|
442 |
StoreLoad = 1 << 1, |
|
443 |
LoadLoad = 1 << 0 |
|
444 |
}; |
|
445 |
||
446 |
void membar(Membar_mask_bits mask, |
|
447 |
Register tmp, |
|
448 |
bool preserve_flags = true, |
|
449 |
Register load_tgt = noreg); |
|
450 |
||
451 |
void breakpoint(AsmCondition cond = al); |
|
452 |
void stop(const char* msg); |
|
453 |
// prints msg and continues |
|
454 |
void warn(const char* msg); |
|
455 |
void unimplemented(const char* what = ""); |
|
456 |
void should_not_reach_here() { stop("should not reach here"); } |
|
457 |
static void debug(const char* msg, const intx* registers); |
|
458 |
||
459 |
// Create a walkable frame to help tracking down who called this code. |
|
460 |
// Returns the frame size in words. |
|
461 |
int should_not_call_this() { |
|
462 |
raw_push(FP, LR); |
|
463 |
should_not_reach_here(); |
|
464 |
flush(); |
|
465 |
return 2; // frame_size_in_words (FP+LR) |
|
466 |
} |
|
467 |
||
468 |
int save_all_registers(); |
|
469 |
void restore_all_registers(); |
|
470 |
int save_caller_save_registers(); |
|
471 |
void restore_caller_save_registers(); |
|
472 |
||
473 |
void add_rc(Register dst, Register arg1, RegisterOrConstant arg2); |
|
474 |
||
475 |
// add_slow and mov_slow are used to manipulate offsets larger than 1024, |
|
476 |
// these functions are not expected to handle all possible constants, |
|
477 |
// only those that can really occur during compilation |
|
478 |
void add_slow(Register rd, Register rn, int c); |
|
479 |
void sub_slow(Register rd, Register rn, int c); |
|
480 |
||
481 |
||
52351 | 482 |
void mov_slow(Register rd, intptr_t c, AsmCondition cond = al); |
42664 | 483 |
void mov_slow(Register rd, const char *string); |
484 |
void mov_slow(Register rd, address addr); |
|
485 |
||
486 |
void patchable_mov_oop(Register rd, jobject o, int oop_index) { |
|
52351 | 487 |
mov_oop(rd, o, oop_index); |
42664 | 488 |
} |
52351 | 489 |
void mov_oop(Register rd, jobject o, int index = 0, AsmCondition cond = al); |
42664 | 490 |
|
491 |
void patchable_mov_metadata(Register rd, Metadata* o, int index) { |
|
52351 | 492 |
mov_metadata(rd, o, index); |
42664 | 493 |
} |
52351 | 494 |
void mov_metadata(Register rd, Metadata* o, int index = 0); |
42664 | 495 |
|
52351 | 496 |
void mov_float(FloatRegister fd, jfloat c, AsmCondition cond = al); |
497 |
void mov_double(FloatRegister fd, jdouble c, AsmCondition cond = al); |
|
498 |
||
42664 | 499 |
|
500 |
// Note: this variant of mov_address assumes the address moves with |
|
501 |
// the code. Do *not* implement it with non-relocated instructions, |
|
502 |
// unless PC-relative. |
|
503 |
void mov_relative_address(Register rd, address addr, AsmCondition cond = al) { |
|
504 |
int offset = addr - pc() - 8; |
|
505 |
assert((offset & 3) == 0, "bad alignment"); |
|
506 |
if (offset >= 0) { |
|
507 |
assert(AsmOperand::is_rotated_imm(offset), "addr too far"); |
|
508 |
add(rd, PC, offset, cond); |
|
509 |
} else { |
|
510 |
assert(AsmOperand::is_rotated_imm(-offset), "addr too far"); |
|
511 |
sub(rd, PC, -offset, cond); |
|
512 |
} |
|
513 |
} |
|
514 |
||
53686 | 515 |
// Runtime address that may vary from one execution to another. |
42664 | 516 |
// Warning: do not implement as a PC relative address. |
53686 | 517 |
void mov_address(Register rd, address addr) { |
42664 | 518 |
mov_address(rd, addr, RelocationHolder::none); |
519 |
} |
|
520 |
||
53686 | 521 |
// rspec can be RelocationHolder::none (for ignored symbolic Relocation). |
42664 | 522 |
// In that case, the address is absolute and the generated code need |
523 |
// not be relocable. |
|
524 |
void mov_address(Register rd, address addr, RelocationHolder const& rspec) { |
|
525 |
assert(rspec.type() != relocInfo::runtime_call_type, "do not use mov_address for runtime calls"); |
|
526 |
assert(rspec.type() != relocInfo::static_call_type, "do not use mov_address for relocable calls"); |
|
527 |
if (rspec.type() == relocInfo::none) { |
|
528 |
// absolute address, relocation not needed |
|
529 |
mov_slow(rd, (intptr_t)addr); |
|
530 |
return; |
|
531 |
} |
|
532 |
if (VM_Version::supports_movw()) { |
|
533 |
relocate(rspec); |
|
534 |
int c = (int)addr; |
|
535 |
movw(rd, c & 0xffff); |
|
536 |
if ((unsigned int)c >> 16) { |
|
537 |
movt(rd, (unsigned int)c >> 16); |
|
538 |
} |
|
539 |
return; |
|
540 |
} |
|
541 |
Label skip_literal; |
|
542 |
InlinedAddress addr_literal(addr, rspec); |
|
543 |
ldr_literal(rd, addr_literal); |
|
544 |
b(skip_literal); |
|
545 |
bind_literal(addr_literal); |
|
546 |
bind(skip_literal); |
|
547 |
} |
|
548 |
||
549 |
// Note: Do not define mov_address for a Label |
|
550 |
// |
|
551 |
// Load from addresses potentially within the code are now handled |
|
552 |
// InlinedLiteral subclasses (to allow more flexibility on how the |
|
553 |
// ldr_literal is performed). |
|
554 |
||
555 |
void ldr_literal(Register rd, InlinedAddress& L) { |
|
556 |
assert(L.rspec().type() != relocInfo::runtime_call_type, "avoid ldr_literal for calls"); |
|
557 |
assert(L.rspec().type() != relocInfo::static_call_type, "avoid ldr_literal for calls"); |
|
558 |
relocate(L.rspec()); |
|
559 |
ldr(rd, Address(PC, target(L.label) - pc() - 8)); |
|
560 |
} |
|
561 |
||
562 |
void ldr_literal(Register rd, InlinedString& L) { |
|
563 |
const char* msg = L.msg(); |
|
564 |
if (code()->consts()->contains((address)msg)) { |
|
565 |
// string address moves with the code |
|
566 |
ldr(rd, Address(PC, ((address)msg) - pc() - 8)); |
|
567 |
return; |
|
568 |
} |
|
569 |
// Warning: use external strings with care. They are not relocated |
|
570 |
// if the code moves. If needed, use code_string to move them |
|
571 |
// to the consts section. |
|
572 |
ldr(rd, Address(PC, target(L.label) - pc() - 8)); |
|
573 |
} |
|
574 |
||
575 |
void ldr_literal(Register rd, InlinedMetadata& L) { |
|
576 |
// relocation done in the bind_literal for metadatas |
|
577 |
ldr(rd, Address(PC, target(L.label) - pc() - 8)); |
|
578 |
} |
|
579 |
||
580 |
void bind_literal(InlinedAddress& L) { |
|
581 |
bind(L.label); |
|
582 |
assert(L.rspec().type() != relocInfo::metadata_type, "Must use InlinedMetadata"); |
|
583 |
// We currently do not use oop 'bound' literals. |
|
584 |
// If the code evolves and the following assert is triggered, |
|
585 |
// we need to implement InlinedOop (see InlinedMetadata). |
|
586 |
assert(L.rspec().type() != relocInfo::oop_type, "Inlined oops not supported"); |
|
587 |
// Note: relocation is handled by relocate calls in ldr_literal |
|
588 |
AbstractAssembler::emit_address((address)L.target()); |
|
589 |
} |
|
590 |
||
591 |
void bind_literal(InlinedString& L) { |
|
592 |
const char* msg = L.msg(); |
|
593 |
if (code()->consts()->contains((address)msg)) { |
|
594 |
// The Label should not be used; avoid binding it |
|
595 |
// to detect errors. |
|
596 |
return; |
|
597 |
} |
|
598 |
bind(L.label); |
|
599 |
AbstractAssembler::emit_address((address)L.msg()); |
|
600 |
} |
|
601 |
||
602 |
void bind_literal(InlinedMetadata& L) { |
|
603 |
bind(L.label); |
|
604 |
relocate(metadata_Relocation::spec_for_immediate()); |
|
605 |
AbstractAssembler::emit_address((address)L.data()); |
|
606 |
} |
|
607 |
||
46961
c9094b1e5f87
8186088: ConstantPoolCache::_resolved_references is not a JNIHandle
coleenp
parents:
46369
diff
changeset
|
608 |
void resolve_oop_handle(Register result); |
42664 | 609 |
void load_mirror(Register mirror, Register method, Register tmp); |
610 |
||
52351 | 611 |
#define ARM_INSTR_1(common_mnemonic, arm32_mnemonic, arg_type) \ |
42664 | 612 |
void common_mnemonic(arg_type arg) { \ |
52351 | 613 |
arm32_mnemonic(arg); \ |
42664 | 614 |
} |
615 |
||
52351 | 616 |
#define ARM_INSTR_2(common_mnemonic, arm32_mnemonic, arg1_type, arg2_type) \ |
42664 | 617 |
void common_mnemonic(arg1_type arg1, arg2_type arg2) { \ |
52351 | 618 |
arm32_mnemonic(arg1, arg2); \ |
42664 | 619 |
} |
620 |
||
52351 | 621 |
#define ARM_INSTR_3(common_mnemonic, arm32_mnemonic, arg1_type, arg2_type, arg3_type) \ |
42664 | 622 |
void common_mnemonic(arg1_type arg1, arg2_type arg2, arg3_type arg3) { \ |
52351 | 623 |
arm32_mnemonic(arg1, arg2, arg3); \ |
42664 | 624 |
} |
625 |
||
52351 | 626 |
ARM_INSTR_1(jump, bx, Register) |
627 |
ARM_INSTR_1(call, blx, Register) |
|
42664 | 628 |
|
52351 | 629 |
ARM_INSTR_2(cbz_32, cbz, Register, Label&) |
630 |
ARM_INSTR_2(cbnz_32, cbnz, Register, Label&) |
|
42664 | 631 |
|
52351 | 632 |
ARM_INSTR_2(ldr_u32, ldr, Register, Address) |
633 |
ARM_INSTR_2(ldr_s32, ldr, Register, Address) |
|
634 |
ARM_INSTR_2(str_32, str, Register, Address) |
|
42664 | 635 |
|
52351 | 636 |
ARM_INSTR_2(mvn_32, mvn, Register, Register) |
637 |
ARM_INSTR_2(cmp_32, cmp, Register, Register) |
|
638 |
ARM_INSTR_2(neg_32, neg, Register, Register) |
|
639 |
ARM_INSTR_2(clz_32, clz, Register, Register) |
|
640 |
ARM_INSTR_2(rbit_32, rbit, Register, Register) |
|
42664 | 641 |
|
52351 | 642 |
ARM_INSTR_2(cmp_32, cmp, Register, int) |
643 |
ARM_INSTR_2(cmn_32, cmn, Register, int) |
|
42664 | 644 |
|
52351 | 645 |
ARM_INSTR_3(add_32, add, Register, Register, Register) |
646 |
ARM_INSTR_3(sub_32, sub, Register, Register, Register) |
|
647 |
ARM_INSTR_3(subs_32, subs, Register, Register, Register) |
|
648 |
ARM_INSTR_3(mul_32, mul, Register, Register, Register) |
|
649 |
ARM_INSTR_3(and_32, andr, Register, Register, Register) |
|
650 |
ARM_INSTR_3(orr_32, orr, Register, Register, Register) |
|
651 |
ARM_INSTR_3(eor_32, eor, Register, Register, Register) |
|
42664 | 652 |
|
52351 | 653 |
ARM_INSTR_3(add_32, add, Register, Register, AsmOperand) |
654 |
ARM_INSTR_3(sub_32, sub, Register, Register, AsmOperand) |
|
655 |
ARM_INSTR_3(orr_32, orr, Register, Register, AsmOperand) |
|
656 |
ARM_INSTR_3(eor_32, eor, Register, Register, AsmOperand) |
|
657 |
ARM_INSTR_3(and_32, andr, Register, Register, AsmOperand) |
|
42664 | 658 |
|
659 |
||
52351 | 660 |
ARM_INSTR_3(add_32, add, Register, Register, int) |
661 |
ARM_INSTR_3(adds_32, adds, Register, Register, int) |
|
662 |
ARM_INSTR_3(sub_32, sub, Register, Register, int) |
|
663 |
ARM_INSTR_3(subs_32, subs, Register, Register, int) |
|
42664 | 664 |
|
52351 | 665 |
ARM_INSTR_2(tst_32, tst, Register, unsigned int) |
666 |
ARM_INSTR_2(tst_32, tst, Register, AsmOperand) |
|
42664 | 667 |
|
52351 | 668 |
ARM_INSTR_3(and_32, andr, Register, Register, uint) |
669 |
ARM_INSTR_3(orr_32, orr, Register, Register, uint) |
|
670 |
ARM_INSTR_3(eor_32, eor, Register, Register, uint) |
|
42664 | 671 |
|
52351 | 672 |
ARM_INSTR_1(cmp_zero_float, fcmpzs, FloatRegister) |
673 |
ARM_INSTR_1(cmp_zero_double, fcmpzd, FloatRegister) |
|
42664 | 674 |
|
52351 | 675 |
ARM_INSTR_2(ldr_float, flds, FloatRegister, Address) |
676 |
ARM_INSTR_2(str_float, fsts, FloatRegister, Address) |
|
677 |
ARM_INSTR_2(mov_float, fcpys, FloatRegister, FloatRegister) |
|
678 |
ARM_INSTR_2(neg_float, fnegs, FloatRegister, FloatRegister) |
|
679 |
ARM_INSTR_2(abs_float, fabss, FloatRegister, FloatRegister) |
|
680 |
ARM_INSTR_2(sqrt_float, fsqrts, FloatRegister, FloatRegister) |
|
681 |
ARM_INSTR_2(cmp_float, fcmps, FloatRegister, FloatRegister) |
|
42664 | 682 |
|
52351 | 683 |
ARM_INSTR_3(add_float, fadds, FloatRegister, FloatRegister, FloatRegister) |
684 |
ARM_INSTR_3(sub_float, fsubs, FloatRegister, FloatRegister, FloatRegister) |
|
685 |
ARM_INSTR_3(mul_float, fmuls, FloatRegister, FloatRegister, FloatRegister) |
|
686 |
ARM_INSTR_3(div_float, fdivs, FloatRegister, FloatRegister, FloatRegister) |
|
42664 | 687 |
|
52351 | 688 |
ARM_INSTR_2(ldr_double, fldd, FloatRegister, Address) |
689 |
ARM_INSTR_2(str_double, fstd, FloatRegister, Address) |
|
690 |
ARM_INSTR_2(mov_double, fcpyd, FloatRegister, FloatRegister) |
|
691 |
ARM_INSTR_2(neg_double, fnegd, FloatRegister, FloatRegister) |
|
692 |
ARM_INSTR_2(cmp_double, fcmpd, FloatRegister, FloatRegister) |
|
693 |
ARM_INSTR_2(abs_double, fabsd, FloatRegister, FloatRegister) |
|
694 |
ARM_INSTR_2(sqrt_double, fsqrtd, FloatRegister, FloatRegister) |
|
42664 | 695 |
|
52351 | 696 |
ARM_INSTR_3(add_double, faddd, FloatRegister, FloatRegister, FloatRegister) |
697 |
ARM_INSTR_3(sub_double, fsubd, FloatRegister, FloatRegister, FloatRegister) |
|
698 |
ARM_INSTR_3(mul_double, fmuld, FloatRegister, FloatRegister, FloatRegister) |
|
699 |
ARM_INSTR_3(div_double, fdivd, FloatRegister, FloatRegister, FloatRegister) |
|
42664 | 700 |
|
52351 | 701 |
ARM_INSTR_2(convert_f2d, fcvtds, FloatRegister, FloatRegister) |
702 |
ARM_INSTR_2(convert_d2f, fcvtsd, FloatRegister, FloatRegister) |
|
42664 | 703 |
|
52351 | 704 |
ARM_INSTR_2(mov_fpr2gpr_float, fmrs, Register, FloatRegister) |
42664 | 705 |
|
52351 | 706 |
#undef ARM_INSTR_1 |
707 |
#undef ARM_INSTR_2 |
|
708 |
#undef ARM_INSTR_3 |
|
42664 | 709 |
|
710 |
||
711 |
||
712 |
void tbz(Register rt, int bit, Label& L) { |
|
713 |
assert(0 <= bit && bit < BitsPerWord, "bit number is out of range"); |
|
714 |
tst(rt, 1 << bit); |
|
715 |
b(L, eq); |
|
716 |
} |
|
717 |
||
718 |
void tbnz(Register rt, int bit, Label& L) { |
|
719 |
assert(0 <= bit && bit < BitsPerWord, "bit number is out of range"); |
|
720 |
tst(rt, 1 << bit); |
|
721 |
b(L, ne); |
|
722 |
} |
|
723 |
||
724 |
void cbz(Register rt, Label& L) { |
|
725 |
cmp(rt, 0); |
|
726 |
b(L, eq); |
|
727 |
} |
|
728 |
||
729 |
void cbz(Register rt, address target) { |
|
730 |
cmp(rt, 0); |
|
731 |
b(target, eq); |
|
732 |
} |
|
733 |
||
734 |
void cbnz(Register rt, Label& L) { |
|
735 |
cmp(rt, 0); |
|
736 |
b(L, ne); |
|
737 |
} |
|
738 |
||
739 |
void ret(Register dst = LR) { |
|
740 |
bx(dst); |
|
741 |
} |
|
742 |
||
743 |
||
744 |
Register zero_register(Register tmp) { |
|
745 |
mov(tmp, 0); |
|
746 |
return tmp; |
|
747 |
} |
|
748 |
||
749 |
void logical_shift_left(Register dst, Register src, int shift) { |
|
750 |
mov(dst, AsmOperand(src, lsl, shift)); |
|
751 |
} |
|
752 |
||
753 |
void logical_shift_left_32(Register dst, Register src, int shift) { |
|
754 |
mov(dst, AsmOperand(src, lsl, shift)); |
|
755 |
} |
|
756 |
||
757 |
void logical_shift_right(Register dst, Register src, int shift) { |
|
758 |
mov(dst, AsmOperand(src, lsr, shift)); |
|
759 |
} |
|
760 |
||
761 |
void arith_shift_right(Register dst, Register src, int shift) { |
|
762 |
mov(dst, AsmOperand(src, asr, shift)); |
|
763 |
} |
|
764 |
||
765 |
void asr_32(Register dst, Register src, int shift) { |
|
766 |
mov(dst, AsmOperand(src, asr, shift)); |
|
767 |
} |
|
768 |
||
769 |
// If <cond> holds, compares r1 and r2. Otherwise, flags are set so that <cond> does not hold. |
|
770 |
void cond_cmp(Register r1, Register r2, AsmCondition cond) { |
|
771 |
cmp(r1, r2, cond); |
|
772 |
} |
|
773 |
||
774 |
// If <cond> holds, compares r and imm. Otherwise, flags are set so that <cond> does not hold. |
|
775 |
void cond_cmp(Register r, int imm, AsmCondition cond) { |
|
776 |
cmp(r, imm, cond); |
|
777 |
} |
|
778 |
||
779 |
void align_reg(Register dst, Register src, int align) { |
|
780 |
assert (is_power_of_2(align), "should be"); |
|
781 |
bic(dst, src, align-1); |
|
782 |
} |
|
783 |
||
784 |
void prefetch_read(Address addr) { |
|
785 |
pld(addr); |
|
786 |
} |
|
787 |
||
788 |
void raw_push(Register r1, Register r2) { |
|
789 |
assert(r1->encoding() < r2->encoding(), "should be ordered"); |
|
790 |
push(RegisterSet(r1) | RegisterSet(r2)); |
|
791 |
} |
|
792 |
||
793 |
void raw_pop(Register r1, Register r2) { |
|
794 |
assert(r1->encoding() < r2->encoding(), "should be ordered"); |
|
795 |
pop(RegisterSet(r1) | RegisterSet(r2)); |
|
796 |
} |
|
797 |
||
798 |
void raw_push(Register r1, Register r2, Register r3) { |
|
799 |
assert(r1->encoding() < r2->encoding() && r2->encoding() < r3->encoding(), "should be ordered"); |
|
800 |
push(RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3)); |
|
801 |
} |
|
802 |
||
803 |
void raw_pop(Register r1, Register r2, Register r3) { |
|
804 |
assert(r1->encoding() < r2->encoding() && r2->encoding() < r3->encoding(), "should be ordered"); |
|
805 |
pop(RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3)); |
|
806 |
} |
|
807 |
||
808 |
// Restores registers r1 and r2 previously saved by raw_push(r1, r2, ret_addr) and returns by ret_addr. Clobbers LR. |
|
809 |
void raw_pop_and_ret(Register r1, Register r2) { |
|
810 |
raw_pop(r1, r2, PC); |
|
811 |
} |
|
812 |
||
813 |
void indirect_jump(Address addr, Register scratch) { |
|
814 |
ldr(PC, addr); |
|
815 |
} |
|
816 |
||
817 |
void indirect_jump(InlinedAddress& literal, Register scratch) { |
|
818 |
ldr_literal(PC, literal); |
|
819 |
} |
|
820 |
||
821 |
void neg(Register dst, Register src) { |
|
822 |
rsb(dst, src, 0); |
|
823 |
} |
|
824 |
||
825 |
void branch_if_negative_32(Register r, Label& L) { |
|
52351 | 826 |
// TODO: This function and branch_if_any_negative_32 could possibly |
827 |
// be revised after the aarch64 removal. |
|
42664 | 828 |
// tbnz is not used instead of tst & b.mi because destination may be out of tbnz range (+-32KB) |
829 |
// since these methods are used in LIR_Assembler::emit_arraycopy() to jump to stub entry. |
|
830 |
tst_32(r, r); |
|
831 |
b(L, mi); |
|
832 |
} |
|
833 |
||
834 |
void branch_if_any_negative_32(Register r1, Register r2, Register tmp, Label& L) { |
|
835 |
orrs(tmp, r1, r2); |
|
836 |
b(L, mi); |
|
837 |
} |
|
838 |
||
839 |
void branch_if_any_negative_32(Register r1, Register r2, Register r3, Register tmp, Label& L) { |
|
840 |
orr_32(tmp, r1, r2); |
|
841 |
orrs(tmp, tmp, r3); |
|
842 |
b(L, mi); |
|
843 |
} |
|
844 |
||
845 |
void add_ptr_scaled_int32(Register dst, Register r1, Register r2, int shift) { |
|
846 |
add(dst, r1, AsmOperand(r2, lsl, shift)); |
|
847 |
} |
|
848 |
||
849 |
void sub_ptr_scaled_int32(Register dst, Register r1, Register r2, int shift) { |
|
850 |
sub(dst, r1, AsmOperand(r2, lsl, shift)); |
|
851 |
} |
|
852 |
||
52676
2d795829f39f
8213845: ARM32: Interpreter doesn't call result handler after native calls
bulasevich
parents:
52462
diff
changeset
|
853 |
// C 'boolean' to Java boolean: x == 0 ? 0 : 1 |
2d795829f39f
8213845: ARM32: Interpreter doesn't call result handler after native calls
bulasevich
parents:
52462
diff
changeset
|
854 |
void c2bool(Register x); |
42664 | 855 |
|
856 |
// klass oop manipulations if compressed |
|
857 |
||
858 |
void load_klass(Register dst_klass, Register src_oop, AsmCondition cond = al); |
|
859 |
||
860 |
void store_klass(Register src_klass, Register dst_oop); |
|
861 |
||
862 |
||
863 |
// oop manipulations |
|
864 |
||
49950
7b916885654d
8201786: Modularize interpreter GC barriers: leftovers for ARM32
shade
parents:
49364
diff
changeset
|
865 |
void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0); |
7b916885654d
8201786: Modularize interpreter GC barriers: leftovers for ARM32
shade
parents:
49364
diff
changeset
|
866 |
void store_heap_oop(Address obj, Register new_val, Register tmp1 = noreg, Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0); |
7b916885654d
8201786: Modularize interpreter GC barriers: leftovers for ARM32
shade
parents:
49364
diff
changeset
|
867 |
void store_heap_oop_null(Address obj, Register new_val, Register tmp1 = noreg, Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0); |
7b916885654d
8201786: Modularize interpreter GC barriers: leftovers for ARM32
shade
parents:
49364
diff
changeset
|
868 |
|
7b916885654d
8201786: Modularize interpreter GC barriers: leftovers for ARM32
shade
parents:
49364
diff
changeset
|
869 |
void access_load_at(BasicType type, DecoratorSet decorators, Address src, Register dst, Register tmp1, Register tmp2, Register tmp3); |
7b916885654d
8201786: Modularize interpreter GC barriers: leftovers for ARM32
shade
parents:
49364
diff
changeset
|
870 |
void access_store_at(BasicType type, DecoratorSet decorators, Address obj, Register new_val, Register tmp1, Register tmp2, Register tmp3, bool is_null); |
42664 | 871 |
|
51847
34e2180a6d51
8209695: ARM: Explicit barriers for interpreter
avoitylov
parents:
51846
diff
changeset
|
872 |
// Resolves obj for access. Result is placed in the same register. |
34e2180a6d51
8209695: ARM: Explicit barriers for interpreter
avoitylov
parents:
51846
diff
changeset
|
873 |
// All other registers are preserved. |
34e2180a6d51
8209695: ARM: Explicit barriers for interpreter
avoitylov
parents:
51846
diff
changeset
|
874 |
void resolve(DecoratorSet decorators, Register obj); |
34e2180a6d51
8209695: ARM: Explicit barriers for interpreter
avoitylov
parents:
51846
diff
changeset
|
875 |
|
42664 | 876 |
|
877 |
void ldr_global_ptr(Register reg, address address_of_global); |
|
878 |
void ldr_global_s32(Register reg, address address_of_global); |
|
879 |
void ldrb_global(Register reg, address address_of_global); |
|
880 |
||
881 |
// address_placeholder_instruction is invalid instruction and is used |
|
882 |
// as placeholder in code for address of label |
|
883 |
enum { address_placeholder_instruction = 0xFFFFFFFF }; |
|
884 |
||
885 |
void emit_address(Label& L) { |
|
886 |
assert(!L.is_bound(), "otherwise address will not be patched"); |
|
887 |
target(L); // creates relocation which will be patched later |
|
888 |
||
889 |
assert ((offset() & (wordSize-1)) == 0, "should be aligned by word size"); |
|
890 |
||
891 |
AbstractAssembler::emit_address((address)address_placeholder_instruction); |
|
892 |
} |
|
893 |
||
894 |
void b(address target, AsmCondition cond = al) { |
|
895 |
Assembler::b(target, cond); \ |
|
896 |
} |
|
897 |
void b(Label& L, AsmCondition cond = al) { |
|
898 |
// internal jumps |
|
899 |
Assembler::b(target(L), cond); |
|
900 |
} |
|
901 |
||
52351 | 902 |
void bl(address target, AsmCondition cond = al) { |
903 |
Assembler::bl(target, cond); |
|
42664 | 904 |
} |
52351 | 905 |
void bl(Label& L, AsmCondition cond = al) { |
42664 | 906 |
// internal calls |
52351 | 907 |
Assembler::bl(target(L), cond); |
42664 | 908 |
} |
909 |
||
910 |
void adr(Register dest, Label& L, AsmCondition cond = al) { |
|
911 |
int delta = target(L) - pc() - 8; |
|
912 |
if (delta >= 0) { |
|
913 |
add(dest, PC, delta, cond); |
|
914 |
} else { |
|
915 |
sub(dest, PC, -delta, cond); |
|
916 |
} |
|
917 |
} |
|
918 |
||
919 |
// Variable-length jump and calls. We now distinguish only the |
|
920 |
// patchable case from the other cases. Patchable must be |
|
921 |
// distinguised from relocable. Relocable means the generated code |
|
922 |
// containing the jump/call may move. Patchable means that the |
|
923 |
// targeted address may be changed later. |
|
924 |
||
925 |
// Non patchable versions. |
|
926 |
// - used only for relocInfo::runtime_call_type and relocInfo::none |
|
927 |
// - may use relative or absolute format (do not use relocInfo::none |
|
928 |
// if the generated code may move) |
|
929 |
// - the implementation takes into account switch to THUMB mode if the |
|
930 |
// destination is a THUMB address |
|
931 |
// - the implementation supports far targets |
|
932 |
// |
|
933 |
// To reduce regression risk, scratch still defaults to noreg on |
|
934 |
// arm32. This results in patchable instructions. However, if |
|
935 |
// patching really matters, the call sites should be modified and |
|
936 |
// use patchable_call or patchable_jump. If patching is not required |
|
937 |
// and if a register can be cloberred, it should be explicitly |
|
938 |
// specified to allow future optimizations. |
|
939 |
void jump(address target, |
|
940 |
relocInfo::relocType rtype = relocInfo::runtime_call_type, |
|
52351 | 941 |
Register scratch = noreg, AsmCondition cond = al); |
42664 | 942 |
|
943 |
void call(address target, |
|
52351 | 944 |
RelocationHolder rspec, AsmCondition cond = al); |
42664 | 945 |
|
946 |
void call(address target, |
|
52351 | 947 |
relocInfo::relocType rtype = relocInfo::runtime_call_type, |
948 |
AsmCondition cond = al) { |
|
949 |
call(target, Relocation::spec_simple(rtype), cond); |
|
42664 | 950 |
} |
951 |
||
952 |
void jump(AddressLiteral dest) { |
|
953 |
jump(dest.target(), dest.reloc()); |
|
954 |
} |
|
955 |
void jump(address dest, relocInfo::relocType rtype, AsmCondition cond) { |
|
956 |
jump(dest, rtype, Rtemp, cond); |
|
957 |
} |
|
958 |
||
959 |
void call(AddressLiteral dest) { |
|
960 |
call(dest.target(), dest.reloc()); |
|
961 |
} |
|
962 |
||
963 |
// Patchable version: |
|
964 |
// - set_destination can be used to atomically change the target |
|
965 |
// |
|
966 |
// The targets for patchable_jump and patchable_call must be in the |
|
967 |
// code cache. |
|
968 |
// [ including possible extensions of the code cache, like AOT code ] |
|
969 |
// |
|
970 |
// To reduce regression risk, scratch still defaults to noreg on |
|
971 |
// arm32. If a register can be cloberred, it should be explicitly |
|
972 |
// specified to allow future optimizations. |
|
973 |
void patchable_jump(address target, |
|
974 |
relocInfo::relocType rtype = relocInfo::runtime_call_type, |
|
52351 | 975 |
Register scratch = noreg, AsmCondition cond = al |
42664 | 976 |
); |
977 |
||
978 |
// patchable_call may scratch Rtemp |
|
979 |
int patchable_call(address target, |
|
980 |
RelocationHolder const& rspec, |
|
981 |
bool c2 = false); |
|
982 |
||
983 |
int patchable_call(address target, |
|
984 |
relocInfo::relocType rtype, |
|
985 |
bool c2 = false) { |
|
986 |
return patchable_call(target, Relocation::spec_simple(rtype), c2); |
|
987 |
} |
|
988 |
||
989 |
||
990 |
static bool _reachable_from_cache(address target); |
|
991 |
static bool _cache_fully_reachable(); |
|
992 |
bool cache_fully_reachable(); |
|
993 |
bool reachable_from_cache(address target); |
|
994 |
||
995 |
void zero_extend(Register rd, Register rn, int bits); |
|
996 |
void sign_extend(Register rd, Register rn, int bits); |
|
997 |
||
998 |
inline void zap_high_non_significant_bits(Register r) { |
|
999 |
} |
|
1000 |
||
51845
f5daffd7ec7a
8210465: ARM: Object equals abstraction for BarrierSetAssembler
avoitylov
parents:
51633
diff
changeset
|
1001 |
void cmpoop(Register obj1, Register obj2); |
f5daffd7ec7a
8210465: ARM: Object equals abstraction for BarrierSetAssembler
avoitylov
parents:
51633
diff
changeset
|
1002 |
|
42664 | 1003 |
void long_move(Register rd_lo, Register rd_hi, |
1004 |
Register rn_lo, Register rn_hi, |
|
1005 |
AsmCondition cond = al); |
|
1006 |
void long_shift(Register rd_lo, Register rd_hi, |
|
1007 |
Register rn_lo, Register rn_hi, |
|
1008 |
AsmShift shift, Register count); |
|
1009 |
void long_shift(Register rd_lo, Register rd_hi, |
|
1010 |
Register rn_lo, Register rn_hi, |
|
1011 |
AsmShift shift, int count); |
|
1012 |
||
1013 |
void atomic_cas(Register tmpreg1, Register tmpreg2, Register oldval, Register newval, Register base, int offset); |
|
1014 |
void atomic_cas_bool(Register oldval, Register newval, Register base, int offset, Register tmpreg); |
|
1015 |
void atomic_cas64(Register temp_lo, Register temp_hi, Register temp_result, Register oldval_lo, Register oldval_hi, Register newval_lo, Register newval_hi, Register base, int offset); |
|
1016 |
||
1017 |
void cas_for_lock_acquire(Register oldval, Register newval, Register base, Register tmp, Label &slow_case, bool allow_fallthrough_on_failure = false, bool one_shot = false); |
|
1018 |
void cas_for_lock_release(Register oldval, Register newval, Register base, Register tmp, Label &slow_case, bool allow_fallthrough_on_failure = false, bool one_shot = false); |
|
1019 |
||
1020 |
#ifndef PRODUCT |
|
1021 |
// Preserves flags and all registers. |
|
1022 |
// On SMP the updated value might not be visible to external observers without a sychronization barrier |
|
1023 |
void cond_atomic_inc32(AsmCondition cond, int* counter_addr); |
|
1024 |
#endif // !PRODUCT |
|
1025 |
||
1026 |
// unconditional non-atomic increment |
|
1027 |
void inc_counter(address counter_addr, Register tmpreg1, Register tmpreg2); |
|
1028 |
void inc_counter(int* counter_addr, Register tmpreg1, Register tmpreg2) { |
|
1029 |
inc_counter((address) counter_addr, tmpreg1, tmpreg2); |
|
1030 |
} |
|
1031 |
||
51633
21154cb84d2a
8209594: guarantee(this->is8bit(imm8)) failed: Short forward jump exceeds 8-bit offset
kvn
parents:
49950
diff
changeset
|
1032 |
void pd_patch_instruction(address branch, address target, const char* file, int line); |
42664 | 1033 |
|
1034 |
// Loading and storing values by size and signed-ness; |
|
1035 |
// size must not exceed wordSize (i.e. 8-byte values are not supported on 32-bit ARM); |
|
1036 |
// each of these calls generates exactly one load or store instruction, |
|
1037 |
// so src can be pre- or post-indexed address. |
|
1038 |
// 32-bit ARM variants also support conditional execution |
|
1039 |
void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, AsmCondition cond = al); |
|
1040 |
void store_sized_value(Register src, Address dst, size_t size_in_bytes, AsmCondition cond = al); |
|
1041 |
||
1042 |
void lookup_interface_method(Register recv_klass, |
|
1043 |
Register intf_klass, |
|
48557 | 1044 |
RegisterOrConstant itable_index, |
42664 | 1045 |
Register method_result, |
1046 |
Register temp_reg1, |
|
1047 |
Register temp_reg2, |
|
1048 |
Label& L_no_such_interface); |
|
1049 |
||
1050 |
// Compare char[] arrays aligned to 4 bytes. |
|
1051 |
void char_arrays_equals(Register ary1, Register ary2, |
|
1052 |
Register limit, Register result, |
|
1053 |
Register chr1, Register chr2, Label& Ldone); |
|
1054 |
||
1055 |
||
1056 |
void floating_cmp(Register dst); |
|
1057 |
||
1058 |
// improved x86 portability (minimizing source code changes) |
|
1059 |
||
1060 |
void ldr_literal(Register rd, AddressLiteral addr) { |
|
1061 |
relocate(addr.rspec()); |
|
1062 |
ldr(rd, Address(PC, addr.target() - pc() - 8)); |
|
1063 |
} |
|
1064 |
||
1065 |
void lea(Register Rd, AddressLiteral addr) { |
|
1066 |
// Never dereferenced, as on x86 (lval status ignored) |
|
1067 |
mov_address(Rd, addr.target(), addr.rspec()); |
|
1068 |
} |
|
1069 |
||
1070 |
void restore_default_fp_mode(); |
|
1071 |
||
1072 |
#ifdef COMPILER2 |
|
53061
5da72d7e0e80
8214512: ARM32: Jtreg test compiler/c2/Test8062950.java fails on ARM
dlong
parents:
52676
diff
changeset
|
1073 |
void fast_lock(Register obj, Register box, Register scratch, Register scratch2, Register scratch3 = noreg); |
42664 | 1074 |
void fast_unlock(Register obj, Register box, Register scratch, Register scratch2); |
1075 |
#endif |
|
1076 |
||
1077 |
||
1078 |
}; |
|
1079 |
||
1080 |
||
1081 |
// The purpose of this class is to build several code fragments of the same size |
|
1082 |
// in order to allow fast table branch. |
|
1083 |
||
49364
601146c66cad
8173070: Remove ValueObj class for allocation subclassing for runtime code
coleenp
parents:
49010
diff
changeset
|
1084 |
class FixedSizeCodeBlock { |
42664 | 1085 |
public: |
1086 |
FixedSizeCodeBlock(MacroAssembler* masm, int size_in_instrs, bool enabled); |
|
1087 |
~FixedSizeCodeBlock(); |
|
1088 |
||
1089 |
private: |
|
1090 |
MacroAssembler* _masm; |
|
1091 |
address _start; |
|
1092 |
int _size_in_instrs; |
|
1093 |
bool _enabled; |
|
1094 |
}; |
|
1095 |
||
1096 |
||
53244
9807daeb47c4
8216167: Update include guards to reflect correct directories
coleenp
parents:
53061
diff
changeset
|
1097 |
#endif // CPU_ARM_MACROASSEMBLER_ARM_HPP |