29183
|
1 |
/*
|
|
2 |
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
|
3 |
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
|
4 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
5 |
*
|
|
6 |
* This code is free software; you can redistribute it and/or modify it
|
|
7 |
* under the terms of the GNU General Public License version 2 only, as
|
|
8 |
* published by the Free Software Foundation.
|
|
9 |
*
|
|
10 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
11 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
12 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
13 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
14 |
* accompanied this code).
|
|
15 |
*
|
|
16 |
* You should have received a copy of the GNU General Public License version
|
|
17 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
18 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
19 |
*
|
|
20 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
21 |
* or visit www.oracle.com if you need additional information or have any
|
|
22 |
* questions.
|
|
23 |
*
|
|
24 |
*/
|
|
25 |
|
|
26 |
#include <sys/types.h>
|
|
27 |
|
|
28 |
#include "precompiled.hpp"
|
|
29 |
#include "asm/assembler.hpp"
|
|
30 |
#include "asm/assembler.inline.hpp"
|
|
31 |
#include "interpreter/interpreter.hpp"
|
|
32 |
|
|
33 |
#include "compiler/disassembler.hpp"
|
|
34 |
#include "memory/resourceArea.hpp"
|
|
35 |
#include "runtime/biasedLocking.hpp"
|
|
36 |
#include "runtime/icache.hpp"
|
|
37 |
#include "runtime/interfaceSupport.hpp"
|
|
38 |
#include "runtime/sharedRuntime.hpp"
|
|
39 |
|
|
40 |
// #include "gc_interface/collectedHeap.inline.hpp"
|
|
41 |
// #include "interpreter/interpreter.hpp"
|
|
42 |
// #include "memory/cardTableModRefBS.hpp"
|
|
43 |
// #include "prims/methodHandles.hpp"
|
|
44 |
// #include "runtime/biasedLocking.hpp"
|
|
45 |
// #include "runtime/interfaceSupport.hpp"
|
|
46 |
// #include "runtime/objectMonitor.hpp"
|
|
47 |
// #include "runtime/os.hpp"
|
|
48 |
// #include "runtime/sharedRuntime.hpp"
|
|
49 |
// #include "runtime/stubRoutines.hpp"
|
|
50 |
|
|
51 |
#if INCLUDE_ALL_GCS
|
|
52 |
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
|
53 |
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
|
|
54 |
#include "gc_implementation/g1/heapRegion.hpp"
|
|
55 |
#endif
|
|
56 |
|
|
57 |
#ifdef PRODUCT
|
|
58 |
#define BLOCK_COMMENT(str) /* nothing */
|
|
59 |
#define STOP(error) stop(error)
|
|
60 |
#else
|
|
61 |
#define BLOCK_COMMENT(str) block_comment(str)
|
|
62 |
#define STOP(error) block_comment(error); stop(error)
|
|
63 |
#endif
|
|
64 |
|
|
65 |
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
|
|
66 |
|
|
67 |
// Patch any kind of instruction; there may be several instructions.
|
|
68 |
// Return the total length (in bytes) of the instructions.
|
|
69 |
int MacroAssembler::pd_patch_instruction_size(address branch, address target) {
|
|
70 |
int instructions = 1;
|
|
71 |
assert((uint64_t)target < (1ul << 48), "48-bit overflow in address constant");
|
|
72 |
long offset = (target - branch) >> 2;
|
|
73 |
unsigned insn = *(unsigned*)branch;
|
|
74 |
if ((Instruction_aarch64::extract(insn, 29, 24) & 0b111011) == 0b011000) {
|
|
75 |
// Load register (literal)
|
|
76 |
Instruction_aarch64::spatch(branch, 23, 5, offset);
|
|
77 |
} else if (Instruction_aarch64::extract(insn, 30, 26) == 0b00101) {
|
|
78 |
// Unconditional branch (immediate)
|
|
79 |
Instruction_aarch64::spatch(branch, 25, 0, offset);
|
|
80 |
} else if (Instruction_aarch64::extract(insn, 31, 25) == 0b0101010) {
|
|
81 |
// Conditional branch (immediate)
|
|
82 |
Instruction_aarch64::spatch(branch, 23, 5, offset);
|
|
83 |
} else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011010) {
|
|
84 |
// Compare & branch (immediate)
|
|
85 |
Instruction_aarch64::spatch(branch, 23, 5, offset);
|
|
86 |
} else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011011) {
|
|
87 |
// Test & branch (immediate)
|
|
88 |
Instruction_aarch64::spatch(branch, 18, 5, offset);
|
|
89 |
} else if (Instruction_aarch64::extract(insn, 28, 24) == 0b10000) {
|
|
90 |
// PC-rel. addressing
|
|
91 |
offset = target-branch;
|
|
92 |
int shift = Instruction_aarch64::extract(insn, 31, 31);
|
|
93 |
if (shift) {
|
|
94 |
u_int64_t dest = (u_int64_t)target;
|
|
95 |
uint64_t pc_page = (uint64_t)branch >> 12;
|
|
96 |
uint64_t adr_page = (uint64_t)target >> 12;
|
|
97 |
unsigned offset_lo = dest & 0xfff;
|
|
98 |
offset = adr_page - pc_page;
|
|
99 |
|
|
100 |
// We handle 3 types of PC relative addressing
|
|
101 |
// 1 - adrp Rx, target_page
|
|
102 |
// ldr/str Ry, [Rx, #offset_in_page]
|
|
103 |
// 2 - adrp Rx, target_page
|
|
104 |
// add Ry, Rx, #offset_in_page
|
|
105 |
// 3 - adrp Rx, target_page (page aligned reloc, offset == 0)
|
|
106 |
// In the first 2 cases we must check that Rx is the same in the adrp and the
|
|
107 |
// subsequent ldr/str or add instruction. Otherwise we could accidentally end
|
|
108 |
// up treating a type 3 relocation as a type 1 or 2 just because it happened
|
|
109 |
// to be followed by a random unrelated ldr/str or add instruction.
|
|
110 |
//
|
|
111 |
// In the case of a type 3 relocation, we know that these are only generated
|
|
112 |
// for the safepoint polling page, or for the card type byte map base so we
|
|
113 |
// assert as much and of course that the offset is 0.
|
|
114 |
//
|
|
115 |
unsigned insn2 = ((unsigned*)branch)[1];
|
|
116 |
if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 &&
|
|
117 |
Instruction_aarch64::extract(insn, 4, 0) ==
|
|
118 |
Instruction_aarch64::extract(insn2, 9, 5)) {
|
|
119 |
// Load/store register (unsigned immediate)
|
|
120 |
unsigned size = Instruction_aarch64::extract(insn2, 31, 30);
|
|
121 |
Instruction_aarch64::patch(branch + sizeof (unsigned),
|
|
122 |
21, 10, offset_lo >> size);
|
|
123 |
guarantee(((dest >> size) << size) == dest, "misaligned target");
|
|
124 |
instructions = 2;
|
|
125 |
} else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 &&
|
|
126 |
Instruction_aarch64::extract(insn, 4, 0) ==
|
|
127 |
Instruction_aarch64::extract(insn2, 4, 0)) {
|
|
128 |
// add (immediate)
|
|
129 |
Instruction_aarch64::patch(branch + sizeof (unsigned),
|
|
130 |
21, 10, offset_lo);
|
|
131 |
instructions = 2;
|
|
132 |
} else {
|
|
133 |
assert((jbyte *)target ==
|
|
134 |
((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base ||
|
|
135 |
target == StubRoutines::crc_table_addr() ||
|
|
136 |
(address)target == os::get_polling_page(),
|
|
137 |
"adrp must be polling page or byte map base");
|
|
138 |
assert(offset_lo == 0, "offset must be 0 for polling page or byte map base");
|
|
139 |
}
|
|
140 |
}
|
|
141 |
int offset_lo = offset & 3;
|
|
142 |
offset >>= 2;
|
|
143 |
Instruction_aarch64::spatch(branch, 23, 5, offset);
|
|
144 |
Instruction_aarch64::patch(branch, 30, 29, offset_lo);
|
|
145 |
} else if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010100) {
|
|
146 |
u_int64_t dest = (u_int64_t)target;
|
|
147 |
// Move wide constant
|
|
148 |
assert(nativeInstruction_at(branch+4)->is_movk(), "wrong insns in patch");
|
|
149 |
assert(nativeInstruction_at(branch+8)->is_movk(), "wrong insns in patch");
|
|
150 |
Instruction_aarch64::patch(branch, 20, 5, dest & 0xffff);
|
|
151 |
Instruction_aarch64::patch(branch+4, 20, 5, (dest >>= 16) & 0xffff);
|
|
152 |
Instruction_aarch64::patch(branch+8, 20, 5, (dest >>= 16) & 0xffff);
|
|
153 |
assert(target_addr_for_insn(branch) == target, "should be");
|
|
154 |
instructions = 3;
|
|
155 |
} else if (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 &&
|
|
156 |
Instruction_aarch64::extract(insn, 4, 0) == 0b11111) {
|
|
157 |
// nothing to do
|
|
158 |
assert(target == 0, "did not expect to relocate target for polling page load");
|
|
159 |
} else {
|
|
160 |
ShouldNotReachHere();
|
|
161 |
}
|
|
162 |
return instructions * NativeInstruction::instruction_size;
|
|
163 |
}
|
|
164 |
|
|
165 |
int MacroAssembler::patch_oop(address insn_addr, address o) {
|
|
166 |
int instructions;
|
|
167 |
unsigned insn = *(unsigned*)insn_addr;
|
|
168 |
assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
|
|
169 |
|
|
170 |
// OOPs are either narrow (32 bits) or wide (48 bits). We encode
|
|
171 |
// narrow OOPs by setting the upper 16 bits in the first
|
|
172 |
// instruction.
|
|
173 |
if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) {
|
|
174 |
// Move narrow OOP
|
|
175 |
narrowOop n = oopDesc::encode_heap_oop((oop)o);
|
|
176 |
Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16);
|
|
177 |
Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff);
|
|
178 |
instructions = 2;
|
|
179 |
} else {
|
|
180 |
// Move wide OOP
|
|
181 |
assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
|
|
182 |
uintptr_t dest = (uintptr_t)o;
|
|
183 |
Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
|
|
184 |
Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
|
|
185 |
Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
|
|
186 |
instructions = 3;
|
|
187 |
}
|
|
188 |
return instructions * NativeInstruction::instruction_size;
|
|
189 |
}
|
|
190 |
|
|
191 |
address MacroAssembler::target_addr_for_insn(address insn_addr, unsigned insn) {
|
|
192 |
long offset = 0;
|
|
193 |
if ((Instruction_aarch64::extract(insn, 29, 24) & 0b011011) == 0b00011000) {
|
|
194 |
// Load register (literal)
|
|
195 |
offset = Instruction_aarch64::sextract(insn, 23, 5);
|
|
196 |
return address(((uint64_t)insn_addr + (offset << 2)));
|
|
197 |
} else if (Instruction_aarch64::extract(insn, 30, 26) == 0b00101) {
|
|
198 |
// Unconditional branch (immediate)
|
|
199 |
offset = Instruction_aarch64::sextract(insn, 25, 0);
|
|
200 |
} else if (Instruction_aarch64::extract(insn, 31, 25) == 0b0101010) {
|
|
201 |
// Conditional branch (immediate)
|
|
202 |
offset = Instruction_aarch64::sextract(insn, 23, 5);
|
|
203 |
} else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011010) {
|
|
204 |
// Compare & branch (immediate)
|
|
205 |
offset = Instruction_aarch64::sextract(insn, 23, 5);
|
|
206 |
} else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011011) {
|
|
207 |
// Test & branch (immediate)
|
|
208 |
offset = Instruction_aarch64::sextract(insn, 18, 5);
|
|
209 |
} else if (Instruction_aarch64::extract(insn, 28, 24) == 0b10000) {
|
|
210 |
// PC-rel. addressing
|
|
211 |
offset = Instruction_aarch64::extract(insn, 30, 29);
|
|
212 |
offset |= Instruction_aarch64::sextract(insn, 23, 5) << 2;
|
|
213 |
int shift = Instruction_aarch64::extract(insn, 31, 31) ? 12 : 0;
|
|
214 |
if (shift) {
|
|
215 |
offset <<= shift;
|
|
216 |
uint64_t target_page = ((uint64_t)insn_addr) + offset;
|
|
217 |
target_page &= ((uint64_t)-1) << shift;
|
|
218 |
// Return the target address for the following sequences
|
|
219 |
// 1 - adrp Rx, target_page
|
|
220 |
// ldr/str Ry, [Rx, #offset_in_page]
|
|
221 |
// 2 - adrp Rx, target_page ]
|
|
222 |
// add Ry, Rx, #offset_in_page
|
|
223 |
// 3 - adrp Rx, target_page (page aligned reloc, offset == 0)
|
|
224 |
//
|
|
225 |
// In the first two cases we check that the register is the same and
|
|
226 |
// return the target_page + the offset within the page.
|
|
227 |
// Otherwise we assume it is a page aligned relocation and return
|
|
228 |
// the target page only. The only cases this is generated is for
|
|
229 |
// the safepoint polling page or for the card table byte map base so
|
|
230 |
// we assert as much.
|
|
231 |
//
|
|
232 |
unsigned insn2 = ((unsigned*)insn_addr)[1];
|
|
233 |
if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 &&
|
|
234 |
Instruction_aarch64::extract(insn, 4, 0) ==
|
|
235 |
Instruction_aarch64::extract(insn2, 9, 5)) {
|
|
236 |
// Load/store register (unsigned immediate)
|
|
237 |
unsigned int byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
|
|
238 |
unsigned int size = Instruction_aarch64::extract(insn2, 31, 30);
|
|
239 |
return address(target_page + (byte_offset << size));
|
|
240 |
} else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 &&
|
|
241 |
Instruction_aarch64::extract(insn, 4, 0) ==
|
|
242 |
Instruction_aarch64::extract(insn2, 4, 0)) {
|
|
243 |
// add (immediate)
|
|
244 |
unsigned int byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
|
|
245 |
return address(target_page + byte_offset);
|
|
246 |
} else {
|
|
247 |
assert((jbyte *)target_page ==
|
|
248 |
((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base ||
|
|
249 |
(address)target_page == os::get_polling_page(),
|
|
250 |
"adrp must be polling page or byte map base");
|
|
251 |
return (address)target_page;
|
|
252 |
}
|
|
253 |
} else {
|
|
254 |
ShouldNotReachHere();
|
|
255 |
}
|
|
256 |
} else if (Instruction_aarch64::extract(insn, 31, 23) == 0b110100101) {
|
|
257 |
u_int32_t *insns = (u_int32_t *)insn_addr;
|
|
258 |
// Move wide constant: movz, movk, movk. See movptr().
|
|
259 |
assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
|
|
260 |
assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
|
|
261 |
return address(u_int64_t(Instruction_aarch64::extract(insns[0], 20, 5))
|
|
262 |
+ (u_int64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
|
|
263 |
+ (u_int64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
|
|
264 |
} else if (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 &&
|
|
265 |
Instruction_aarch64::extract(insn, 4, 0) == 0b11111) {
|
|
266 |
return 0;
|
|
267 |
} else {
|
|
268 |
ShouldNotReachHere();
|
|
269 |
}
|
|
270 |
return address(((uint64_t)insn_addr + (offset << 2)));
|
|
271 |
}
|
|
272 |
|
|
273 |
void MacroAssembler::serialize_memory(Register thread, Register tmp) {
|
|
274 |
dsb(Assembler::SY);
|
|
275 |
}
|
|
276 |
|
|
277 |
|
|
278 |
void MacroAssembler::reset_last_Java_frame(bool clear_fp,
|
|
279 |
bool clear_pc) {
|
|
280 |
// we must set sp to zero to clear frame
|
|
281 |
str(zr, Address(rthread, JavaThread::last_Java_sp_offset()));
|
|
282 |
// must clear fp, so that compiled frames are not confused; it is
|
|
283 |
// possible that we need it only for debugging
|
|
284 |
if (clear_fp) {
|
|
285 |
str(zr, Address(rthread, JavaThread::last_Java_fp_offset()));
|
|
286 |
}
|
|
287 |
|
|
288 |
if (clear_pc) {
|
|
289 |
str(zr, Address(rthread, JavaThread::last_Java_pc_offset()));
|
|
290 |
}
|
|
291 |
}
|
|
292 |
|
|
293 |
// Calls to C land
|
|
294 |
//
|
|
295 |
// When entering C land, the rfp, & resp of the last Java frame have to be recorded
|
|
296 |
// in the (thread-local) JavaThread object. When leaving C land, the last Java fp
|
|
297 |
// has to be reset to 0. This is required to allow proper stack traversal.
|
|
298 |
void MacroAssembler::set_last_Java_frame(Register last_java_sp,
|
|
299 |
Register last_java_fp,
|
|
300 |
Register last_java_pc,
|
|
301 |
Register scratch) {
|
|
302 |
|
|
303 |
if (last_java_pc->is_valid()) {
|
|
304 |
str(last_java_pc, Address(rthread,
|
|
305 |
JavaThread::frame_anchor_offset()
|
|
306 |
+ JavaFrameAnchor::last_Java_pc_offset()));
|
|
307 |
}
|
|
308 |
|
|
309 |
// determine last_java_sp register
|
|
310 |
if (last_java_sp == sp) {
|
|
311 |
mov(scratch, sp);
|
|
312 |
last_java_sp = scratch;
|
|
313 |
} else if (!last_java_sp->is_valid()) {
|
|
314 |
last_java_sp = esp;
|
|
315 |
}
|
|
316 |
|
|
317 |
str(last_java_sp, Address(rthread, JavaThread::last_Java_sp_offset()));
|
|
318 |
|
|
319 |
// last_java_fp is optional
|
|
320 |
if (last_java_fp->is_valid()) {
|
|
321 |
str(last_java_fp, Address(rthread, JavaThread::last_Java_fp_offset()));
|
|
322 |
}
|
|
323 |
}
|
|
324 |
|
|
325 |
void MacroAssembler::set_last_Java_frame(Register last_java_sp,
|
|
326 |
Register last_java_fp,
|
|
327 |
address last_java_pc,
|
|
328 |
Register scratch) {
|
|
329 |
if (last_java_pc != NULL) {
|
|
330 |
adr(scratch, last_java_pc);
|
|
331 |
} else {
|
|
332 |
// FIXME: This is almost never correct. We should delete all
|
|
333 |
// cases of set_last_Java_frame with last_java_pc=NULL and use the
|
|
334 |
// correct return address instead.
|
|
335 |
adr(scratch, pc());
|
|
336 |
}
|
|
337 |
|
|
338 |
str(scratch, Address(rthread,
|
|
339 |
JavaThread::frame_anchor_offset()
|
|
340 |
+ JavaFrameAnchor::last_Java_pc_offset()));
|
|
341 |
|
|
342 |
set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch);
|
|
343 |
}
|
|
344 |
|
|
345 |
void MacroAssembler::set_last_Java_frame(Register last_java_sp,
|
|
346 |
Register last_java_fp,
|
|
347 |
Label &L,
|
|
348 |
Register scratch) {
|
|
349 |
if (L.is_bound()) {
|
|
350 |
set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch);
|
|
351 |
} else {
|
|
352 |
InstructionMark im(this);
|
|
353 |
L.add_patch_at(code(), locator());
|
|
354 |
set_last_Java_frame(last_java_sp, last_java_fp, (address)NULL, scratch);
|
|
355 |
}
|
|
356 |
}
|
|
357 |
|
|
358 |
void MacroAssembler::far_call(Address entry, CodeBuffer *cbuf, Register tmp) {
|
|
359 |
assert(ReservedCodeCacheSize < 4*G, "branch out of range");
|
|
360 |
assert(CodeCache::find_blob(entry.target()) != NULL,
|
|
361 |
"destination of far call not found in code cache");
|
|
362 |
if (far_branches()) {
|
|
363 |
unsigned long offset;
|
|
364 |
// We can use ADRP here because we know that the total size of
|
|
365 |
// the code cache cannot exceed 2Gb.
|
|
366 |
adrp(tmp, entry, offset);
|
|
367 |
add(tmp, tmp, offset);
|
|
368 |
if (cbuf) cbuf->set_insts_mark();
|
|
369 |
blr(tmp);
|
|
370 |
} else {
|
|
371 |
if (cbuf) cbuf->set_insts_mark();
|
|
372 |
bl(entry);
|
|
373 |
}
|
|
374 |
}
|
|
375 |
|
|
376 |
void MacroAssembler::far_jump(Address entry, CodeBuffer *cbuf, Register tmp) {
|
|
377 |
assert(ReservedCodeCacheSize < 4*G, "branch out of range");
|
|
378 |
assert(CodeCache::find_blob(entry.target()) != NULL,
|
|
379 |
"destination of far call not found in code cache");
|
|
380 |
if (far_branches()) {
|
|
381 |
unsigned long offset;
|
|
382 |
// We can use ADRP here because we know that the total size of
|
|
383 |
// the code cache cannot exceed 2Gb.
|
|
384 |
adrp(tmp, entry, offset);
|
|
385 |
add(tmp, tmp, offset);
|
|
386 |
if (cbuf) cbuf->set_insts_mark();
|
|
387 |
br(tmp);
|
|
388 |
} else {
|
|
389 |
if (cbuf) cbuf->set_insts_mark();
|
|
390 |
b(entry);
|
|
391 |
}
|
|
392 |
}
|
|
393 |
|
|
394 |
int MacroAssembler::biased_locking_enter(Register lock_reg,
|
|
395 |
Register obj_reg,
|
|
396 |
Register swap_reg,
|
|
397 |
Register tmp_reg,
|
|
398 |
bool swap_reg_contains_mark,
|
|
399 |
Label& done,
|
|
400 |
Label* slow_case,
|
|
401 |
BiasedLockingCounters* counters) {
|
|
402 |
assert(UseBiasedLocking, "why call this otherwise?");
|
|
403 |
assert_different_registers(lock_reg, obj_reg, swap_reg);
|
|
404 |
|
|
405 |
if (PrintBiasedLockingStatistics && counters == NULL)
|
|
406 |
counters = BiasedLocking::counters();
|
|
407 |
|
|
408 |
bool need_tmp_reg = false;
|
|
409 |
if (tmp_reg == noreg) {
|
|
410 |
tmp_reg = rscratch2;
|
|
411 |
}
|
|
412 |
assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg, rscratch1);
|
|
413 |
assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
|
|
414 |
Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
|
|
415 |
Address klass_addr (obj_reg, oopDesc::klass_offset_in_bytes());
|
|
416 |
Address saved_mark_addr(lock_reg, 0);
|
|
417 |
|
|
418 |
// Biased locking
|
|
419 |
// See whether the lock is currently biased toward our thread and
|
|
420 |
// whether the epoch is still valid
|
|
421 |
// Note that the runtime guarantees sufficient alignment of JavaThread
|
|
422 |
// pointers to allow age to be placed into low bits
|
|
423 |
// First check to see whether biasing is even enabled for this object
|
|
424 |
Label cas_label;
|
|
425 |
int null_check_offset = -1;
|
|
426 |
if (!swap_reg_contains_mark) {
|
|
427 |
null_check_offset = offset();
|
|
428 |
ldr(swap_reg, mark_addr);
|
|
429 |
}
|
|
430 |
andr(tmp_reg, swap_reg, markOopDesc::biased_lock_mask_in_place);
|
|
431 |
cmp(tmp_reg, markOopDesc::biased_lock_pattern);
|
|
432 |
br(Assembler::NE, cas_label);
|
|
433 |
// The bias pattern is present in the object's header. Need to check
|
|
434 |
// whether the bias owner and the epoch are both still current.
|
|
435 |
load_prototype_header(tmp_reg, obj_reg);
|
|
436 |
orr(tmp_reg, tmp_reg, rthread);
|
|
437 |
eor(tmp_reg, swap_reg, tmp_reg);
|
|
438 |
andr(tmp_reg, tmp_reg, ~((int) markOopDesc::age_mask_in_place));
|
|
439 |
if (counters != NULL) {
|
|
440 |
Label around;
|
|
441 |
cbnz(tmp_reg, around);
|
|
442 |
atomic_incw(Address((address)counters->biased_lock_entry_count_addr()), tmp_reg, rscratch1);
|
|
443 |
b(done);
|
|
444 |
bind(around);
|
|
445 |
} else {
|
|
446 |
cbz(tmp_reg, done);
|
|
447 |
}
|
|
448 |
|
|
449 |
Label try_revoke_bias;
|
|
450 |
Label try_rebias;
|
|
451 |
|
|
452 |
// At this point we know that the header has the bias pattern and
|
|
453 |
// that we are not the bias owner in the current epoch. We need to
|
|
454 |
// figure out more details about the state of the header in order to
|
|
455 |
// know what operations can be legally performed on the object's
|
|
456 |
// header.
|
|
457 |
|
|
458 |
// If the low three bits in the xor result aren't clear, that means
|
|
459 |
// the prototype header is no longer biased and we have to revoke
|
|
460 |
// the bias on this object.
|
|
461 |
andr(rscratch1, tmp_reg, markOopDesc::biased_lock_mask_in_place);
|
|
462 |
cbnz(rscratch1, try_revoke_bias);
|
|
463 |
|
|
464 |
// Biasing is still enabled for this data type. See whether the
|
|
465 |
// epoch of the current bias is still valid, meaning that the epoch
|
|
466 |
// bits of the mark word are equal to the epoch bits of the
|
|
467 |
// prototype header. (Note that the prototype header's epoch bits
|
|
468 |
// only change at a safepoint.) If not, attempt to rebias the object
|
|
469 |
// toward the current thread. Note that we must be absolutely sure
|
|
470 |
// that the current epoch is invalid in order to do this because
|
|
471 |
// otherwise the manipulations it performs on the mark word are
|
|
472 |
// illegal.
|
|
473 |
andr(rscratch1, tmp_reg, markOopDesc::epoch_mask_in_place);
|
|
474 |
cbnz(rscratch1, try_rebias);
|
|
475 |
|
|
476 |
// The epoch of the current bias is still valid but we know nothing
|
|
477 |
// about the owner; it might be set or it might be clear. Try to
|
|
478 |
// acquire the bias of the object using an atomic operation. If this
|
|
479 |
// fails we will go in to the runtime to revoke the object's bias.
|
|
480 |
// Note that we first construct the presumed unbiased header so we
|
|
481 |
// don't accidentally blow away another thread's valid bias.
|
|
482 |
{
|
|
483 |
Label here;
|
|
484 |
mov(rscratch1, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
|
|
485 |
andr(swap_reg, swap_reg, rscratch1);
|
|
486 |
orr(tmp_reg, swap_reg, rthread);
|
|
487 |
cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case);
|
|
488 |
// If the biasing toward our thread failed, this means that
|
|
489 |
// another thread succeeded in biasing it toward itself and we
|
|
490 |
// need to revoke that bias. The revocation will occur in the
|
|
491 |
// interpreter runtime in the slow case.
|
|
492 |
bind(here);
|
|
493 |
if (counters != NULL) {
|
|
494 |
atomic_incw(Address((address)counters->anonymously_biased_lock_entry_count_addr()),
|
|
495 |
tmp_reg, rscratch1);
|
|
496 |
}
|
|
497 |
}
|
|
498 |
b(done);
|
|
499 |
|
|
500 |
bind(try_rebias);
|
|
501 |
// At this point we know the epoch has expired, meaning that the
|
|
502 |
// current "bias owner", if any, is actually invalid. Under these
|
|
503 |
// circumstances _only_, we are allowed to use the current header's
|
|
504 |
// value as the comparison value when doing the cas to acquire the
|
|
505 |
// bias in the current epoch. In other words, we allow transfer of
|
|
506 |
// the bias from one thread to another directly in this situation.
|
|
507 |
//
|
|
508 |
// FIXME: due to a lack of registers we currently blow away the age
|
|
509 |
// bits in this situation. Should attempt to preserve them.
|
|
510 |
{
|
|
511 |
Label here;
|
|
512 |
load_prototype_header(tmp_reg, obj_reg);
|
|
513 |
orr(tmp_reg, rthread, tmp_reg);
|
|
514 |
cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, slow_case);
|
|
515 |
// If the biasing toward our thread failed, then another thread
|
|
516 |
// succeeded in biasing it toward itself and we need to revoke that
|
|
517 |
// bias. The revocation will occur in the runtime in the slow case.
|
|
518 |
bind(here);
|
|
519 |
if (counters != NULL) {
|
|
520 |
atomic_incw(Address((address)counters->rebiased_lock_entry_count_addr()),
|
|
521 |
tmp_reg, rscratch1);
|
|
522 |
}
|
|
523 |
}
|
|
524 |
b(done);
|
|
525 |
|
|
526 |
bind(try_revoke_bias);
|
|
527 |
// The prototype mark in the klass doesn't have the bias bit set any
|
|
528 |
// more, indicating that objects of this data type are not supposed
|
|
529 |
// to be biased any more. We are going to try to reset the mark of
|
|
530 |
// this object to the prototype value and fall through to the
|
|
531 |
// CAS-based locking scheme. Note that if our CAS fails, it means
|
|
532 |
// that another thread raced us for the privilege of revoking the
|
|
533 |
// bias of this particular object, so it's okay to continue in the
|
|
534 |
// normal locking code.
|
|
535 |
//
|
|
536 |
// FIXME: due to a lack of registers we currently blow away the age
|
|
537 |
// bits in this situation. Should attempt to preserve them.
|
|
538 |
{
|
|
539 |
Label here, nope;
|
|
540 |
load_prototype_header(tmp_reg, obj_reg);
|
|
541 |
cmpxchgptr(swap_reg, tmp_reg, obj_reg, rscratch1, here, &nope);
|
|
542 |
bind(here);
|
|
543 |
|
|
544 |
// Fall through to the normal CAS-based lock, because no matter what
|
|
545 |
// the result of the above CAS, some thread must have succeeded in
|
|
546 |
// removing the bias bit from the object's header.
|
|
547 |
if (counters != NULL) {
|
|
548 |
atomic_incw(Address((address)counters->revoked_lock_entry_count_addr()), tmp_reg,
|
|
549 |
rscratch1);
|
|
550 |
}
|
|
551 |
bind(nope);
|
|
552 |
}
|
|
553 |
|
|
554 |
bind(cas_label);
|
|
555 |
|
|
556 |
return null_check_offset;
|
|
557 |
}
|
|
558 |
|
|
559 |
void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) {
|
|
560 |
assert(UseBiasedLocking, "why call this otherwise?");
|
|
561 |
|
|
562 |
// Check for biased locking unlock case, which is a no-op
|
|
563 |
// Note: we do not have to check the thread ID for two reasons.
|
|
564 |
// First, the interpreter checks for IllegalMonitorStateException at
|
|
565 |
// a higher level. Second, if the bias was revoked while we held the
|
|
566 |
// lock, the object could not be rebiased toward another thread, so
|
|
567 |
// the bias bit would be clear.
|
|
568 |
ldr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
|
569 |
andr(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place);
|
|
570 |
cmp(temp_reg, markOopDesc::biased_lock_pattern);
|
|
571 |
br(Assembler::EQ, done);
|
|
572 |
}
|
|
573 |
|
|
574 |
|
|
575 |
// added to make this compile
|
|
576 |
|
|
577 |
REGISTER_DEFINITION(Register, noreg);
|
|
578 |
|
|
579 |
static void pass_arg0(MacroAssembler* masm, Register arg) {
|
|
580 |
if (c_rarg0 != arg ) {
|
|
581 |
masm->mov(c_rarg0, arg);
|
|
582 |
}
|
|
583 |
}
|
|
584 |
|
|
585 |
static void pass_arg1(MacroAssembler* masm, Register arg) {
|
|
586 |
if (c_rarg1 != arg ) {
|
|
587 |
masm->mov(c_rarg1, arg);
|
|
588 |
}
|
|
589 |
}
|
|
590 |
|
|
591 |
static void pass_arg2(MacroAssembler* masm, Register arg) {
|
|
592 |
if (c_rarg2 != arg ) {
|
|
593 |
masm->mov(c_rarg2, arg);
|
|
594 |
}
|
|
595 |
}
|
|
596 |
|
|
597 |
static void pass_arg3(MacroAssembler* masm, Register arg) {
|
|
598 |
if (c_rarg3 != arg ) {
|
|
599 |
masm->mov(c_rarg3, arg);
|
|
600 |
}
|
|
601 |
}
|
|
602 |
|
|
603 |
void MacroAssembler::call_VM_base(Register oop_result,
|
|
604 |
Register java_thread,
|
|
605 |
Register last_java_sp,
|
|
606 |
address entry_point,
|
|
607 |
int number_of_arguments,
|
|
608 |
bool check_exceptions) {
|
|
609 |
// determine java_thread register
|
|
610 |
if (!java_thread->is_valid()) {
|
|
611 |
java_thread = rthread;
|
|
612 |
}
|
|
613 |
|
|
614 |
// determine last_java_sp register
|
|
615 |
if (!last_java_sp->is_valid()) {
|
|
616 |
last_java_sp = esp;
|
|
617 |
}
|
|
618 |
|
|
619 |
// debugging support
|
|
620 |
assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
|
|
621 |
assert(java_thread == rthread, "unexpected register");
|
|
622 |
#ifdef ASSERT
|
|
623 |
// TraceBytecodes does not use r12 but saves it over the call, so don't verify
|
|
624 |
// if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");
|
|
625 |
#endif // ASSERT
|
|
626 |
|
|
627 |
assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
|
|
628 |
assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
|
|
629 |
|
|
630 |
// push java thread (becomes first argument of C function)
|
|
631 |
|
|
632 |
mov(c_rarg0, java_thread);
|
|
633 |
|
|
634 |
// set last Java frame before call
|
|
635 |
assert(last_java_sp != rfp, "can't use rfp");
|
|
636 |
|
|
637 |
Label l;
|
|
638 |
set_last_Java_frame(last_java_sp, rfp, l, rscratch1);
|
|
639 |
|
|
640 |
// do the call, remove parameters
|
|
641 |
MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l);
|
|
642 |
|
|
643 |
// reset last Java frame
|
|
644 |
// Only interpreter should have to clear fp
|
|
645 |
reset_last_Java_frame(true, true);
|
|
646 |
|
|
647 |
// C++ interp handles this in the interpreter
|
|
648 |
check_and_handle_popframe(java_thread);
|
|
649 |
check_and_handle_earlyret(java_thread);
|
|
650 |
|
|
651 |
if (check_exceptions) {
|
|
652 |
// check for pending exceptions (java_thread is set upon return)
|
|
653 |
ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset())));
|
|
654 |
Label ok;
|
|
655 |
cbz(rscratch1, ok);
|
|
656 |
lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
|
|
657 |
br(rscratch1);
|
|
658 |
bind(ok);
|
|
659 |
}
|
|
660 |
|
|
661 |
// get oop result if there is one and reset the value in the thread
|
|
662 |
if (oop_result->is_valid()) {
|
|
663 |
get_vm_result(oop_result, java_thread);
|
|
664 |
}
|
|
665 |
}
|
|
666 |
|
|
667 |
void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
|
|
668 |
call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions);
|
|
669 |
}
|
|
670 |
|
|
671 |
// Maybe emit a call via a trampoline. If the code cache is small
|
|
672 |
// trampolines won't be emitted.
|
|
673 |
|
|
674 |
void MacroAssembler::trampoline_call(Address entry, CodeBuffer *cbuf) {
|
|
675 |
assert(entry.rspec().type() == relocInfo::runtime_call_type
|
|
676 |
|| entry.rspec().type() == relocInfo::opt_virtual_call_type
|
|
677 |
|| entry.rspec().type() == relocInfo::static_call_type
|
|
678 |
|| entry.rspec().type() == relocInfo::virtual_call_type, "wrong reloc type");
|
|
679 |
|
|
680 |
unsigned int start_offset = offset();
|
|
681 |
if (far_branches() && !Compile::current()->in_scratch_emit_size()) {
|
|
682 |
emit_trampoline_stub(offset(), entry.target());
|
|
683 |
}
|
|
684 |
|
|
685 |
if (cbuf) cbuf->set_insts_mark();
|
|
686 |
relocate(entry.rspec());
|
|
687 |
if (Assembler::reachable_from_branch_at(pc(), entry.target())) {
|
|
688 |
bl(entry.target());
|
|
689 |
} else {
|
|
690 |
bl(pc());
|
|
691 |
}
|
|
692 |
}
|
|
693 |
|
|
694 |
|
|
695 |
// Emit a trampoline stub for a call to a target which is too far away.
|
|
696 |
//
|
|
697 |
// code sequences:
|
|
698 |
//
|
|
699 |
// call-site:
|
|
700 |
// branch-and-link to <destination> or <trampoline stub>
|
|
701 |
//
|
|
702 |
// Related trampoline stub for this call site in the stub section:
|
|
703 |
// load the call target from the constant pool
|
|
704 |
// branch (LR still points to the call site above)
|
|
705 |
|
|
706 |
void MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
|
|
707 |
address dest) {
|
|
708 |
address stub = start_a_stub(Compile::MAX_stubs_size/2);
|
|
709 |
if (stub == NULL) {
|
|
710 |
start_a_stub(Compile::MAX_stubs_size/2);
|
|
711 |
Compile::current()->env()->record_out_of_memory_failure();
|
|
712 |
return;
|
|
713 |
}
|
|
714 |
|
|
715 |
// Create a trampoline stub relocation which relates this trampoline stub
|
|
716 |
// with the call instruction at insts_call_instruction_offset in the
|
|
717 |
// instructions code-section.
|
|
718 |
align(wordSize);
|
|
719 |
relocate(trampoline_stub_Relocation::spec(code()->insts()->start()
|
|
720 |
+ insts_call_instruction_offset));
|
|
721 |
const int stub_start_offset = offset();
|
|
722 |
|
|
723 |
// Now, create the trampoline stub's code:
|
|
724 |
// - load the call
|
|
725 |
// - call
|
|
726 |
Label target;
|
|
727 |
ldr(rscratch1, target);
|
|
728 |
br(rscratch1);
|
|
729 |
bind(target);
|
|
730 |
assert(offset() - stub_start_offset == NativeCallTrampolineStub::data_offset,
|
|
731 |
"should be");
|
|
732 |
emit_int64((int64_t)dest);
|
|
733 |
|
|
734 |
const address stub_start_addr = addr_at(stub_start_offset);
|
|
735 |
|
|
736 |
assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline");
|
|
737 |
|
|
738 |
end_a_stub();
|
|
739 |
}
|
|
740 |
|
|
741 |
void MacroAssembler::ic_call(address entry) {
|
|
742 |
RelocationHolder rh = virtual_call_Relocation::spec(pc());
|
|
743 |
// address const_ptr = long_constant((jlong)Universe::non_oop_word());
|
|
744 |
// unsigned long offset;
|
|
745 |
// ldr_constant(rscratch2, const_ptr);
|
|
746 |
movptr(rscratch2, (uintptr_t)Universe::non_oop_word());
|
|
747 |
trampoline_call(Address(entry, rh));
|
|
748 |
}
|
|
749 |
|
|
750 |
// Implementation of call_VM versions
|
|
751 |
|
|
752 |
void MacroAssembler::call_VM(Register oop_result,
|
|
753 |
address entry_point,
|
|
754 |
bool check_exceptions) {
|
|
755 |
call_VM_helper(oop_result, entry_point, 0, check_exceptions);
|
|
756 |
}
|
|
757 |
|
|
758 |
void MacroAssembler::call_VM(Register oop_result,
|
|
759 |
address entry_point,
|
|
760 |
Register arg_1,
|
|
761 |
bool check_exceptions) {
|
|
762 |
pass_arg1(this, arg_1);
|
|
763 |
call_VM_helper(oop_result, entry_point, 1, check_exceptions);
|
|
764 |
}
|
|
765 |
|
|
766 |
void MacroAssembler::call_VM(Register oop_result,
|
|
767 |
address entry_point,
|
|
768 |
Register arg_1,
|
|
769 |
Register arg_2,
|
|
770 |
bool check_exceptions) {
|
|
771 |
assert(arg_1 != c_rarg2, "smashed arg");
|
|
772 |
pass_arg2(this, arg_2);
|
|
773 |
pass_arg1(this, arg_1);
|
|
774 |
call_VM_helper(oop_result, entry_point, 2, check_exceptions);
|
|
775 |
}
|
|
776 |
|
|
777 |
void MacroAssembler::call_VM(Register oop_result,
|
|
778 |
address entry_point,
|
|
779 |
Register arg_1,
|
|
780 |
Register arg_2,
|
|
781 |
Register arg_3,
|
|
782 |
bool check_exceptions) {
|
|
783 |
assert(arg_1 != c_rarg3, "smashed arg");
|
|
784 |
assert(arg_2 != c_rarg3, "smashed arg");
|
|
785 |
pass_arg3(this, arg_3);
|
|
786 |
|
|
787 |
assert(arg_1 != c_rarg2, "smashed arg");
|
|
788 |
pass_arg2(this, arg_2);
|
|
789 |
|
|
790 |
pass_arg1(this, arg_1);
|
|
791 |
call_VM_helper(oop_result, entry_point, 3, check_exceptions);
|
|
792 |
}
|
|
793 |
|
|
794 |
void MacroAssembler::call_VM(Register oop_result,
|
|
795 |
Register last_java_sp,
|
|
796 |
address entry_point,
|
|
797 |
int number_of_arguments,
|
|
798 |
bool check_exceptions) {
|
|
799 |
call_VM_base(oop_result, rthread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
|
|
800 |
}
|
|
801 |
|
|
802 |
void MacroAssembler::call_VM(Register oop_result,
|
|
803 |
Register last_java_sp,
|
|
804 |
address entry_point,
|
|
805 |
Register arg_1,
|
|
806 |
bool check_exceptions) {
|
|
807 |
pass_arg1(this, arg_1);
|
|
808 |
call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
|
|
809 |
}
|
|
810 |
|
|
811 |
void MacroAssembler::call_VM(Register oop_result,
|
|
812 |
Register last_java_sp,
|
|
813 |
address entry_point,
|
|
814 |
Register arg_1,
|
|
815 |
Register arg_2,
|
|
816 |
bool check_exceptions) {
|
|
817 |
|
|
818 |
assert(arg_1 != c_rarg2, "smashed arg");
|
|
819 |
pass_arg2(this, arg_2);
|
|
820 |
pass_arg1(this, arg_1);
|
|
821 |
call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
|
|
822 |
}
|
|
823 |
|
|
824 |
void MacroAssembler::call_VM(Register oop_result,
|
|
825 |
Register last_java_sp,
|
|
826 |
address entry_point,
|
|
827 |
Register arg_1,
|
|
828 |
Register arg_2,
|
|
829 |
Register arg_3,
|
|
830 |
bool check_exceptions) {
|
|
831 |
assert(arg_1 != c_rarg3, "smashed arg");
|
|
832 |
assert(arg_2 != c_rarg3, "smashed arg");
|
|
833 |
pass_arg3(this, arg_3);
|
|
834 |
assert(arg_1 != c_rarg2, "smashed arg");
|
|
835 |
pass_arg2(this, arg_2);
|
|
836 |
pass_arg1(this, arg_1);
|
|
837 |
call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
|
|
838 |
}
|
|
839 |
|
|
840 |
|
|
841 |
void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) {
|
|
842 |
ldr(oop_result, Address(java_thread, JavaThread::vm_result_offset()));
|
|
843 |
str(zr, Address(java_thread, JavaThread::vm_result_offset()));
|
|
844 |
verify_oop(oop_result, "broken oop in call_VM_base");
|
|
845 |
}
|
|
846 |
|
|
847 |
void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) {
|
|
848 |
ldr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset()));
|
|
849 |
str(zr, Address(java_thread, JavaThread::vm_result_2_offset()));
|
|
850 |
}
|
|
851 |
|
|
852 |
void MacroAssembler::align(int modulus) {
|
|
853 |
while (offset() % modulus != 0) nop();
|
|
854 |
}
|
|
855 |
|
|
856 |
// these are no-ops overridden by InterpreterMacroAssembler
|
|
857 |
|
|
858 |
void MacroAssembler::check_and_handle_earlyret(Register java_thread) { }
|
|
859 |
|
|
860 |
void MacroAssembler::check_and_handle_popframe(Register java_thread) { }
|
|
861 |
|
|
862 |
|
|
863 |
RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
|
|
864 |
Register tmp,
|
|
865 |
int offset) {
|
|
866 |
intptr_t value = *delayed_value_addr;
|
|
867 |
if (value != 0)
|
|
868 |
return RegisterOrConstant(value + offset);
|
|
869 |
|
|
870 |
// load indirectly to solve generation ordering problem
|
|
871 |
ldr(tmp, ExternalAddress((address) delayed_value_addr));
|
|
872 |
|
|
873 |
if (offset != 0)
|
|
874 |
add(tmp, tmp, offset);
|
|
875 |
|
|
876 |
return RegisterOrConstant(tmp);
|
|
877 |
}
|
|
878 |
|
|
879 |
|
|
880 |
void MacroAssembler:: notify(int type) {
|
|
881 |
if (type == bytecode_start) {
|
|
882 |
// set_last_Java_frame(esp, rfp, (address)NULL);
|
|
883 |
Assembler:: notify(type);
|
|
884 |
// reset_last_Java_frame(true, false);
|
|
885 |
}
|
|
886 |
else
|
|
887 |
Assembler:: notify(type);
|
|
888 |
}
|
|
889 |
|
|
890 |
// Look up the method for a megamorphic invokeinterface call.
|
|
891 |
// The target method is determined by <intf_klass, itable_index>.
|
|
892 |
// The receiver klass is in recv_klass.
|
|
893 |
// On success, the result will be in method_result, and execution falls through.
|
|
894 |
// On failure, execution transfers to the given label.
|
|
895 |
void MacroAssembler::lookup_interface_method(Register recv_klass,
|
|
896 |
Register intf_klass,
|
|
897 |
RegisterOrConstant itable_index,
|
|
898 |
Register method_result,
|
|
899 |
Register scan_temp,
|
|
900 |
Label& L_no_such_interface) {
|
|
901 |
assert_different_registers(recv_klass, intf_klass, method_result, scan_temp);
|
|
902 |
assert(itable_index.is_constant() || itable_index.as_register() == method_result,
|
|
903 |
"caller must use same register for non-constant itable index as for method");
|
|
904 |
|
|
905 |
// Compute start of first itableOffsetEntry (which is at the end of the vtable)
|
|
906 |
int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
|
|
907 |
int itentry_off = itableMethodEntry::method_offset_in_bytes();
|
|
908 |
int scan_step = itableOffsetEntry::size() * wordSize;
|
|
909 |
int vte_size = vtableEntry::size() * wordSize;
|
|
910 |
assert(vte_size == wordSize, "else adjust times_vte_scale");
|
|
911 |
|
|
912 |
ldrw(scan_temp, Address(recv_klass, InstanceKlass::vtable_length_offset() * wordSize));
|
|
913 |
|
|
914 |
// %%% Could store the aligned, prescaled offset in the klassoop.
|
|
915 |
// lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
|
|
916 |
lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(3)));
|
|
917 |
add(scan_temp, scan_temp, vtable_base);
|
|
918 |
if (HeapWordsPerLong > 1) {
|
|
919 |
// Round up to align_object_offset boundary
|
|
920 |
// see code for instanceKlass::start_of_itable!
|
|
921 |
round_to(scan_temp, BytesPerLong);
|
|
922 |
}
|
|
923 |
|
|
924 |
// Adjust recv_klass by scaled itable_index, so we can free itable_index.
|
|
925 |
assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
|
|
926 |
// lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
|
|
927 |
lea(recv_klass, Address(recv_klass, itable_index, Address::lsl(3)));
|
|
928 |
if (itentry_off)
|
|
929 |
add(recv_klass, recv_klass, itentry_off);
|
|
930 |
|
|
931 |
// for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
|
|
932 |
// if (scan->interface() == intf) {
|
|
933 |
// result = (klass + scan->offset() + itable_index);
|
|
934 |
// }
|
|
935 |
// }
|
|
936 |
Label search, found_method;
|
|
937 |
|
|
938 |
for (int peel = 1; peel >= 0; peel--) {
|
|
939 |
ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes()));
|
|
940 |
cmp(intf_klass, method_result);
|
|
941 |
|
|
942 |
if (peel) {
|
|
943 |
br(Assembler::EQ, found_method);
|
|
944 |
} else {
|
|
945 |
br(Assembler::NE, search);
|
|
946 |
// (invert the test to fall through to found_method...)
|
|
947 |
}
|
|
948 |
|
|
949 |
if (!peel) break;
|
|
950 |
|
|
951 |
bind(search);
|
|
952 |
|
|
953 |
// Check that the previous entry is non-null. A null entry means that
|
|
954 |
// the receiver class doesn't implement the interface, and wasn't the
|
|
955 |
// same as when the caller was compiled.
|
|
956 |
cbz(method_result, L_no_such_interface);
|
|
957 |
add(scan_temp, scan_temp, scan_step);
|
|
958 |
}
|
|
959 |
|
|
960 |
bind(found_method);
|
|
961 |
|
|
962 |
// Got a hit.
|
|
963 |
ldr(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes()));
|
|
964 |
ldr(method_result, Address(recv_klass, scan_temp));
|
|
965 |
}
|
|
966 |
|
|
967 |
// virtual method calling
|
|
968 |
void MacroAssembler::lookup_virtual_method(Register recv_klass,
|
|
969 |
RegisterOrConstant vtable_index,
|
|
970 |
Register method_result) {
|
|
971 |
const int base = InstanceKlass::vtable_start_offset() * wordSize;
|
|
972 |
assert(vtableEntry::size() * wordSize == 8,
|
|
973 |
"adjust the scaling in the code below");
|
|
974 |
int vtable_offset_in_bytes = base + vtableEntry::method_offset_in_bytes();
|
|
975 |
|
|
976 |
if (vtable_index.is_register()) {
|
|
977 |
lea(method_result, Address(recv_klass,
|
|
978 |
vtable_index.as_register(),
|
|
979 |
Address::lsl(LogBytesPerWord)));
|
|
980 |
ldr(method_result, Address(method_result, vtable_offset_in_bytes));
|
|
981 |
} else {
|
|
982 |
vtable_offset_in_bytes += vtable_index.as_constant() * wordSize;
|
|
983 |
ldr(method_result, Address(recv_klass, vtable_offset_in_bytes));
|
|
984 |
}
|
|
985 |
}
|
|
986 |
|
|
987 |
void MacroAssembler::check_klass_subtype(Register sub_klass,
|
|
988 |
Register super_klass,
|
|
989 |
Register temp_reg,
|
|
990 |
Label& L_success) {
|
|
991 |
Label L_failure;
|
|
992 |
check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, NULL);
|
|
993 |
check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, NULL);
|
|
994 |
bind(L_failure);
|
|
995 |
}
|
|
996 |
|
|
997 |
|
|
998 |
void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
|
|
999 |
Register super_klass,
|
|
1000 |
Register temp_reg,
|
|
1001 |
Label* L_success,
|
|
1002 |
Label* L_failure,
|
|
1003 |
Label* L_slow_path,
|
|
1004 |
RegisterOrConstant super_check_offset) {
|
|
1005 |
assert_different_registers(sub_klass, super_klass, temp_reg);
|
|
1006 |
bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
|
|
1007 |
if (super_check_offset.is_register()) {
|
|
1008 |
assert_different_registers(sub_klass, super_klass,
|
|
1009 |
super_check_offset.as_register());
|
|
1010 |
} else if (must_load_sco) {
|
|
1011 |
assert(temp_reg != noreg, "supply either a temp or a register offset");
|
|
1012 |
}
|
|
1013 |
|
|
1014 |
Label L_fallthrough;
|
|
1015 |
int label_nulls = 0;
|
|
1016 |
if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
|
|
1017 |
if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
|
|
1018 |
if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
|
|
1019 |
assert(label_nulls <= 1, "at most one NULL in the batch");
|
|
1020 |
|
|
1021 |
int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
|
|
1022 |
int sco_offset = in_bytes(Klass::super_check_offset_offset());
|
|
1023 |
Address super_check_offset_addr(super_klass, sco_offset);
|
|
1024 |
|
|
1025 |
// Hacked jmp, which may only be used just before L_fallthrough.
|
|
1026 |
#define final_jmp(label) \
|
|
1027 |
if (&(label) == &L_fallthrough) { /*do nothing*/ } \
|
|
1028 |
else b(label) /*omit semi*/
|
|
1029 |
|
|
1030 |
// If the pointers are equal, we are done (e.g., String[] elements).
|
|
1031 |
// This self-check enables sharing of secondary supertype arrays among
|
|
1032 |
// non-primary types such as array-of-interface. Otherwise, each such
|
|
1033 |
// type would need its own customized SSA.
|
|
1034 |
// We move this check to the front of the fast path because many
|
|
1035 |
// type checks are in fact trivially successful in this manner,
|
|
1036 |
// so we get a nicely predicted branch right at the start of the check.
|
|
1037 |
cmp(sub_klass, super_klass);
|
|
1038 |
br(Assembler::EQ, *L_success);
|
|
1039 |
|
|
1040 |
// Check the supertype display:
|
|
1041 |
if (must_load_sco) {
|
|
1042 |
ldrw(temp_reg, super_check_offset_addr);
|
|
1043 |
super_check_offset = RegisterOrConstant(temp_reg);
|
|
1044 |
}
|
|
1045 |
Address super_check_addr(sub_klass, super_check_offset);
|
|
1046 |
ldr(rscratch1, super_check_addr);
|
|
1047 |
cmp(super_klass, rscratch1); // load displayed supertype
|
|
1048 |
|
|
1049 |
// This check has worked decisively for primary supers.
|
|
1050 |
// Secondary supers are sought in the super_cache ('super_cache_addr').
|
|
1051 |
// (Secondary supers are interfaces and very deeply nested subtypes.)
|
|
1052 |
// This works in the same check above because of a tricky aliasing
|
|
1053 |
// between the super_cache and the primary super display elements.
|
|
1054 |
// (The 'super_check_addr' can address either, as the case requires.)
|
|
1055 |
// Note that the cache is updated below if it does not help us find
|
|
1056 |
// what we need immediately.
|
|
1057 |
// So if it was a primary super, we can just fail immediately.
|
|
1058 |
// Otherwise, it's the slow path for us (no success at this point).
|
|
1059 |
|
|
1060 |
if (super_check_offset.is_register()) {
|
|
1061 |
br(Assembler::EQ, *L_success);
|
|
1062 |
cmp(super_check_offset.as_register(), sc_offset);
|
|
1063 |
if (L_failure == &L_fallthrough) {
|
|
1064 |
br(Assembler::EQ, *L_slow_path);
|
|
1065 |
} else {
|
|
1066 |
br(Assembler::NE, *L_failure);
|
|
1067 |
final_jmp(*L_slow_path);
|
|
1068 |
}
|
|
1069 |
} else if (super_check_offset.as_constant() == sc_offset) {
|
|
1070 |
// Need a slow path; fast failure is impossible.
|
|
1071 |
if (L_slow_path == &L_fallthrough) {
|
|
1072 |
br(Assembler::EQ, *L_success);
|
|
1073 |
} else {
|
|
1074 |
br(Assembler::NE, *L_slow_path);
|
|
1075 |
final_jmp(*L_success);
|
|
1076 |
}
|
|
1077 |
} else {
|
|
1078 |
// No slow path; it's a fast decision.
|
|
1079 |
if (L_failure == &L_fallthrough) {
|
|
1080 |
br(Assembler::EQ, *L_success);
|
|
1081 |
} else {
|
|
1082 |
br(Assembler::NE, *L_failure);
|
|
1083 |
final_jmp(*L_success);
|
|
1084 |
}
|
|
1085 |
}
|
|
1086 |
|
|
1087 |
bind(L_fallthrough);
|
|
1088 |
|
|
1089 |
#undef final_jmp
|
|
1090 |
}
|
|
1091 |
|
|
1092 |
// These two are taken from x86, but they look generally useful
|
|
1093 |
|
|
1094 |
// scans count pointer sized words at [addr] for occurence of value,
|
|
1095 |
// generic
|
|
1096 |
void MacroAssembler::repne_scan(Register addr, Register value, Register count,
|
|
1097 |
Register scratch) {
|
|
1098 |
Label Lloop, Lexit;
|
|
1099 |
cbz(count, Lexit);
|
|
1100 |
bind(Lloop);
|
|
1101 |
ldr(scratch, post(addr, wordSize));
|
|
1102 |
cmp(value, scratch);
|
|
1103 |
br(EQ, Lexit);
|
|
1104 |
sub(count, count, 1);
|
|
1105 |
cbnz(count, Lloop);
|
|
1106 |
bind(Lexit);
|
|
1107 |
}
|
|
1108 |
|
|
1109 |
// scans count 4 byte words at [addr] for occurence of value,
|
|
1110 |
// generic
|
|
1111 |
void MacroAssembler::repne_scanw(Register addr, Register value, Register count,
|
|
1112 |
Register scratch) {
|
|
1113 |
Label Lloop, Lexit;
|
|
1114 |
cbz(count, Lexit);
|
|
1115 |
bind(Lloop);
|
|
1116 |
ldrw(scratch, post(addr, wordSize));
|
|
1117 |
cmpw(value, scratch);
|
|
1118 |
br(EQ, Lexit);
|
|
1119 |
sub(count, count, 1);
|
|
1120 |
cbnz(count, Lloop);
|
|
1121 |
bind(Lexit);
|
|
1122 |
}
|
|
1123 |
|
|
1124 |
void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
|
|
1125 |
Register super_klass,
|
|
1126 |
Register temp_reg,
|
|
1127 |
Register temp2_reg,
|
|
1128 |
Label* L_success,
|
|
1129 |
Label* L_failure,
|
|
1130 |
bool set_cond_codes) {
|
|
1131 |
assert_different_registers(sub_klass, super_klass, temp_reg);
|
|
1132 |
if (temp2_reg != noreg)
|
|
1133 |
assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1);
|
|
1134 |
#define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
|
|
1135 |
|
|
1136 |
Label L_fallthrough;
|
|
1137 |
int label_nulls = 0;
|
|
1138 |
if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
|
|
1139 |
if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
|
|
1140 |
assert(label_nulls <= 1, "at most one NULL in the batch");
|
|
1141 |
|
|
1142 |
// a couple of useful fields in sub_klass:
|
|
1143 |
int ss_offset = in_bytes(Klass::secondary_supers_offset());
|
|
1144 |
int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
|
|
1145 |
Address secondary_supers_addr(sub_klass, ss_offset);
|
|
1146 |
Address super_cache_addr( sub_klass, sc_offset);
|
|
1147 |
|
|
1148 |
BLOCK_COMMENT("check_klass_subtype_slow_path");
|
|
1149 |
|
|
1150 |
// Do a linear scan of the secondary super-klass chain.
|
|
1151 |
// This code is rarely used, so simplicity is a virtue here.
|
|
1152 |
// The repne_scan instruction uses fixed registers, which we must spill.
|
|
1153 |
// Don't worry too much about pre-existing connections with the input regs.
|
|
1154 |
|
|
1155 |
assert(sub_klass != r0, "killed reg"); // killed by mov(r0, super)
|
|
1156 |
assert(sub_klass != r2, "killed reg"); // killed by lea(r2, &pst_counter)
|
|
1157 |
|
|
1158 |
// Get super_klass value into r0 (even if it was in r5 or r2).
|
|
1159 |
RegSet pushed_registers;
|
|
1160 |
if (!IS_A_TEMP(r2)) pushed_registers += r2;
|
|
1161 |
if (!IS_A_TEMP(r5)) pushed_registers += r5;
|
|
1162 |
|
|
1163 |
if (super_klass != r0 || UseCompressedOops) {
|
|
1164 |
if (!IS_A_TEMP(r0)) pushed_registers += r0;
|
|
1165 |
}
|
|
1166 |
|
|
1167 |
push(pushed_registers, sp);
|
|
1168 |
|
|
1169 |
#ifndef PRODUCT
|
|
1170 |
mov(rscratch2, (address)&SharedRuntime::_partial_subtype_ctr);
|
|
1171 |
Address pst_counter_addr(rscratch2);
|
|
1172 |
ldr(rscratch1, pst_counter_addr);
|
|
1173 |
add(rscratch1, rscratch1, 1);
|
|
1174 |
str(rscratch1, pst_counter_addr);
|
|
1175 |
#endif //PRODUCT
|
|
1176 |
|
|
1177 |
// We will consult the secondary-super array.
|
|
1178 |
ldr(r5, secondary_supers_addr);
|
|
1179 |
// Load the array length.
|
|
1180 |
ldrw(r2, Address(r5, Array<Klass*>::length_offset_in_bytes()));
|
|
1181 |
// Skip to start of data.
|
|
1182 |
add(r5, r5, Array<Klass*>::base_offset_in_bytes());
|
|
1183 |
|
|
1184 |
cmp(sp, zr); // Clear Z flag; SP is never zero
|
|
1185 |
// Scan R2 words at [R5] for an occurrence of R0.
|
|
1186 |
// Set NZ/Z based on last compare.
|
|
1187 |
repne_scan(r5, r0, r2, rscratch1);
|
|
1188 |
|
|
1189 |
// Unspill the temp. registers:
|
|
1190 |
pop(pushed_registers, sp);
|
|
1191 |
|
|
1192 |
br(Assembler::NE, *L_failure);
|
|
1193 |
|
|
1194 |
// Success. Cache the super we found and proceed in triumph.
|
|
1195 |
str(super_klass, super_cache_addr);
|
|
1196 |
|
|
1197 |
if (L_success != &L_fallthrough) {
|
|
1198 |
b(*L_success);
|
|
1199 |
}
|
|
1200 |
|
|
1201 |
#undef IS_A_TEMP
|
|
1202 |
|
|
1203 |
bind(L_fallthrough);
|
|
1204 |
}
|
|
1205 |
|
|
1206 |
|
|
1207 |
void MacroAssembler::verify_oop(Register reg, const char* s) {
|
|
1208 |
if (!VerifyOops) return;
|
|
1209 |
|
|
1210 |
// Pass register number to verify_oop_subroutine
|
|
1211 |
const char* b = NULL;
|
|
1212 |
{
|
|
1213 |
ResourceMark rm;
|
|
1214 |
stringStream ss;
|
|
1215 |
ss.print("verify_oop: %s: %s", reg->name(), s);
|
|
1216 |
b = code_string(ss.as_string());
|
|
1217 |
}
|
|
1218 |
BLOCK_COMMENT("verify_oop {");
|
|
1219 |
|
|
1220 |
stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
|
|
1221 |
stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
|
|
1222 |
|
|
1223 |
mov(r0, reg);
|
|
1224 |
mov(rscratch1, (address)b);
|
|
1225 |
|
|
1226 |
// call indirectly to solve generation ordering problem
|
|
1227 |
lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
|
|
1228 |
ldr(rscratch2, Address(rscratch2));
|
|
1229 |
blr(rscratch2);
|
|
1230 |
|
|
1231 |
ldp(rscratch2, lr, Address(post(sp, 2 * wordSize)));
|
|
1232 |
ldp(r0, rscratch1, Address(post(sp, 2 * wordSize)));
|
|
1233 |
|
|
1234 |
BLOCK_COMMENT("} verify_oop");
|
|
1235 |
}
|
|
1236 |
|
|
1237 |
void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
|
|
1238 |
if (!VerifyOops) return;
|
|
1239 |
|
|
1240 |
const char* b = NULL;
|
|
1241 |
{
|
|
1242 |
ResourceMark rm;
|
|
1243 |
stringStream ss;
|
|
1244 |
ss.print("verify_oop_addr: %s", s);
|
|
1245 |
b = code_string(ss.as_string());
|
|
1246 |
}
|
|
1247 |
BLOCK_COMMENT("verify_oop_addr {");
|
|
1248 |
|
|
1249 |
stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
|
|
1250 |
stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
|
|
1251 |
|
|
1252 |
// addr may contain sp so we will have to adjust it based on the
|
|
1253 |
// pushes that we just did.
|
|
1254 |
if (addr.uses(sp)) {
|
|
1255 |
lea(r0, addr);
|
|
1256 |
ldr(r0, Address(r0, 4 * wordSize));
|
|
1257 |
} else {
|
|
1258 |
ldr(r0, addr);
|
|
1259 |
}
|
|
1260 |
mov(rscratch1, (address)b);
|
|
1261 |
|
|
1262 |
// call indirectly to solve generation ordering problem
|
|
1263 |
lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
|
|
1264 |
ldr(rscratch2, Address(rscratch2));
|
|
1265 |
blr(rscratch2);
|
|
1266 |
|
|
1267 |
ldp(rscratch2, lr, Address(post(sp, 2 * wordSize)));
|
|
1268 |
ldp(r0, rscratch1, Address(post(sp, 2 * wordSize)));
|
|
1269 |
|
|
1270 |
BLOCK_COMMENT("} verify_oop_addr");
|
|
1271 |
}
|
|
1272 |
|
|
1273 |
Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
|
|
1274 |
int extra_slot_offset) {
|
|
1275 |
// cf. TemplateTable::prepare_invoke(), if (load_receiver).
|
|
1276 |
int stackElementSize = Interpreter::stackElementSize;
|
|
1277 |
int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
|
|
1278 |
#ifdef ASSERT
|
|
1279 |
int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
|
|
1280 |
assert(offset1 - offset == stackElementSize, "correct arithmetic");
|
|
1281 |
#endif
|
|
1282 |
if (arg_slot.is_constant()) {
|
|
1283 |
return Address(esp, arg_slot.as_constant() * stackElementSize
|
|
1284 |
+ offset);
|
|
1285 |
} else {
|
|
1286 |
add(rscratch1, esp, arg_slot.as_register(),
|
|
1287 |
ext::uxtx, exact_log2(stackElementSize));
|
|
1288 |
return Address(rscratch1, offset);
|
|
1289 |
}
|
|
1290 |
}
|
|
1291 |
|
|
1292 |
void MacroAssembler::call_VM_leaf_base(address entry_point,
|
|
1293 |
int number_of_arguments,
|
|
1294 |
Label *retaddr) {
|
|
1295 |
call_VM_leaf_base1(entry_point, number_of_arguments, 0, ret_type_integral, retaddr);
|
|
1296 |
}
|
|
1297 |
|
|
1298 |
void MacroAssembler::call_VM_leaf_base1(address entry_point,
|
|
1299 |
int number_of_gp_arguments,
|
|
1300 |
int number_of_fp_arguments,
|
|
1301 |
ret_type type,
|
|
1302 |
Label *retaddr) {
|
|
1303 |
Label E, L;
|
|
1304 |
|
|
1305 |
stp(rscratch1, rmethod, Address(pre(sp, -2 * wordSize)));
|
|
1306 |
|
|
1307 |
// We add 1 to number_of_arguments because the thread in arg0 is
|
|
1308 |
// not counted
|
|
1309 |
mov(rscratch1, entry_point);
|
|
1310 |
blrt(rscratch1, number_of_gp_arguments + 1, number_of_fp_arguments, type);
|
|
1311 |
if (retaddr)
|
|
1312 |
bind(*retaddr);
|
|
1313 |
|
|
1314 |
ldp(rscratch1, rmethod, Address(post(sp, 2 * wordSize)));
|
|
1315 |
maybe_isb();
|
|
1316 |
}
|
|
1317 |
|
|
1318 |
void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
|
|
1319 |
call_VM_leaf_base(entry_point, number_of_arguments);
|
|
1320 |
}
|
|
1321 |
|
|
1322 |
void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
|
|
1323 |
pass_arg0(this, arg_0);
|
|
1324 |
call_VM_leaf_base(entry_point, 1);
|
|
1325 |
}
|
|
1326 |
|
|
1327 |
void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
|
|
1328 |
pass_arg0(this, arg_0);
|
|
1329 |
pass_arg1(this, arg_1);
|
|
1330 |
call_VM_leaf_base(entry_point, 2);
|
|
1331 |
}
|
|
1332 |
|
|
1333 |
void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0,
|
|
1334 |
Register arg_1, Register arg_2) {
|
|
1335 |
pass_arg0(this, arg_0);
|
|
1336 |
pass_arg1(this, arg_1);
|
|
1337 |
pass_arg2(this, arg_2);
|
|
1338 |
call_VM_leaf_base(entry_point, 3);
|
|
1339 |
}
|
|
1340 |
|
|
1341 |
void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
|
|
1342 |
pass_arg0(this, arg_0);
|
|
1343 |
MacroAssembler::call_VM_leaf_base(entry_point, 1);
|
|
1344 |
}
|
|
1345 |
|
|
1346 |
void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
|
|
1347 |
|
|
1348 |
assert(arg_0 != c_rarg1, "smashed arg");
|
|
1349 |
pass_arg1(this, arg_1);
|
|
1350 |
pass_arg0(this, arg_0);
|
|
1351 |
MacroAssembler::call_VM_leaf_base(entry_point, 2);
|
|
1352 |
}
|
|
1353 |
|
|
1354 |
void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
|
|
1355 |
assert(arg_0 != c_rarg2, "smashed arg");
|
|
1356 |
assert(arg_1 != c_rarg2, "smashed arg");
|
|
1357 |
pass_arg2(this, arg_2);
|
|
1358 |
assert(arg_0 != c_rarg1, "smashed arg");
|
|
1359 |
pass_arg1(this, arg_1);
|
|
1360 |
pass_arg0(this, arg_0);
|
|
1361 |
MacroAssembler::call_VM_leaf_base(entry_point, 3);
|
|
1362 |
}
|
|
1363 |
|
|
1364 |
void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
|
|
1365 |
assert(arg_0 != c_rarg3, "smashed arg");
|
|
1366 |
assert(arg_1 != c_rarg3, "smashed arg");
|
|
1367 |
assert(arg_2 != c_rarg3, "smashed arg");
|
|
1368 |
pass_arg3(this, arg_3);
|
|
1369 |
assert(arg_0 != c_rarg2, "smashed arg");
|
|
1370 |
assert(arg_1 != c_rarg2, "smashed arg");
|
|
1371 |
pass_arg2(this, arg_2);
|
|
1372 |
assert(arg_0 != c_rarg1, "smashed arg");
|
|
1373 |
pass_arg1(this, arg_1);
|
|
1374 |
pass_arg0(this, arg_0);
|
|
1375 |
MacroAssembler::call_VM_leaf_base(entry_point, 4);
|
|
1376 |
}
|
|
1377 |
|
|
1378 |
void MacroAssembler::null_check(Register reg, int offset) {
|
|
1379 |
if (needs_explicit_null_check(offset)) {
|
|
1380 |
// provoke OS NULL exception if reg = NULL by
|
|
1381 |
// accessing M[reg] w/o changing any registers
|
|
1382 |
// NOTE: this is plenty to provoke a segv
|
|
1383 |
ldr(zr, Address(reg));
|
|
1384 |
} else {
|
|
1385 |
// nothing to do, (later) access of M[reg + offset]
|
|
1386 |
// will provoke OS NULL exception if reg = NULL
|
|
1387 |
}
|
|
1388 |
}
|
|
1389 |
|
|
1390 |
// MacroAssembler protected routines needed to implement
|
|
1391 |
// public methods
|
|
1392 |
|
|
1393 |
void MacroAssembler::mov(Register r, Address dest) {
|
|
1394 |
code_section()->relocate(pc(), dest.rspec());
|
|
1395 |
u_int64_t imm64 = (u_int64_t)dest.target();
|
|
1396 |
movptr(r, imm64);
|
|
1397 |
}
|
|
1398 |
|
|
1399 |
// Move a constant pointer into r. In AArch64 mode the virtual
|
|
1400 |
// address space is 48 bits in size, so we only need three
|
|
1401 |
// instructions to create a patchable instruction sequence that can
|
|
1402 |
// reach anywhere.
|
|
1403 |
void MacroAssembler::movptr(Register r, uintptr_t imm64) {
|
|
1404 |
#ifndef PRODUCT
|
|
1405 |
{
|
|
1406 |
char buffer[64];
|
|
1407 |
snprintf(buffer, sizeof(buffer), "0x%"PRIX64, imm64);
|
|
1408 |
block_comment(buffer);
|
|
1409 |
}
|
|
1410 |
#endif
|
|
1411 |
assert(imm64 < (1ul << 48), "48-bit overflow in address constant");
|
|
1412 |
movz(r, imm64 & 0xffff);
|
|
1413 |
imm64 >>= 16;
|
|
1414 |
movk(r, imm64 & 0xffff, 16);
|
|
1415 |
imm64 >>= 16;
|
|
1416 |
movk(r, imm64 & 0xffff, 32);
|
|
1417 |
}
|
|
1418 |
|
|
1419 |
void MacroAssembler::mov_immediate64(Register dst, u_int64_t imm64)
|
|
1420 |
{
|
|
1421 |
#ifndef PRODUCT
|
|
1422 |
{
|
|
1423 |
char buffer[64];
|
|
1424 |
snprintf(buffer, sizeof(buffer), "0x%"PRIX64, imm64);
|
|
1425 |
block_comment(buffer);
|
|
1426 |
}
|
|
1427 |
#endif
|
|
1428 |
if (operand_valid_for_logical_immediate(false, imm64)) {
|
|
1429 |
orr(dst, zr, imm64);
|
|
1430 |
} else {
|
|
1431 |
// we can use a combination of MOVZ or MOVN with
|
|
1432 |
// MOVK to build up the constant
|
|
1433 |
u_int64_t imm_h[4];
|
|
1434 |
int zero_count = 0;
|
|
1435 |
int neg_count = 0;
|
|
1436 |
int i;
|
|
1437 |
for (i = 0; i < 4; i++) {
|
|
1438 |
imm_h[i] = ((imm64 >> (i * 16)) & 0xffffL);
|
|
1439 |
if (imm_h[i] == 0) {
|
|
1440 |
zero_count++;
|
|
1441 |
} else if (imm_h[i] == 0xffffL) {
|
|
1442 |
neg_count++;
|
|
1443 |
}
|
|
1444 |
}
|
|
1445 |
if (zero_count == 4) {
|
|
1446 |
// one MOVZ will do
|
|
1447 |
movz(dst, 0);
|
|
1448 |
} else if (neg_count == 4) {
|
|
1449 |
// one MOVN will do
|
|
1450 |
movn(dst, 0);
|
|
1451 |
} else if (zero_count == 3) {
|
|
1452 |
for (i = 0; i < 4; i++) {
|
|
1453 |
if (imm_h[i] != 0L) {
|
|
1454 |
movz(dst, (u_int32_t)imm_h[i], (i << 4));
|
|
1455 |
break;
|
|
1456 |
}
|
|
1457 |
}
|
|
1458 |
} else if (neg_count == 3) {
|
|
1459 |
// one MOVN will do
|
|
1460 |
for (int i = 0; i < 4; i++) {
|
|
1461 |
if (imm_h[i] != 0xffffL) {
|
|
1462 |
movn(dst, (u_int32_t)imm_h[i] ^ 0xffffL, (i << 4));
|
|
1463 |
break;
|
|
1464 |
}
|
|
1465 |
}
|
|
1466 |
} else if (zero_count == 2) {
|
|
1467 |
// one MOVZ and one MOVK will do
|
|
1468 |
for (i = 0; i < 3; i++) {
|
|
1469 |
if (imm_h[i] != 0L) {
|
|
1470 |
movz(dst, (u_int32_t)imm_h[i], (i << 4));
|
|
1471 |
i++;
|
|
1472 |
break;
|
|
1473 |
}
|
|
1474 |
}
|
|
1475 |
for (;i < 4; i++) {
|
|
1476 |
if (imm_h[i] != 0L) {
|
|
1477 |
movk(dst, (u_int32_t)imm_h[i], (i << 4));
|
|
1478 |
}
|
|
1479 |
}
|
|
1480 |
} else if (neg_count == 2) {
|
|
1481 |
// one MOVN and one MOVK will do
|
|
1482 |
for (i = 0; i < 4; i++) {
|
|
1483 |
if (imm_h[i] != 0xffffL) {
|
|
1484 |
movn(dst, (u_int32_t)imm_h[i] ^ 0xffffL, (i << 4));
|
|
1485 |
i++;
|
|
1486 |
break;
|
|
1487 |
}
|
|
1488 |
}
|
|
1489 |
for (;i < 4; i++) {
|
|
1490 |
if (imm_h[i] != 0xffffL) {
|
|
1491 |
movk(dst, (u_int32_t)imm_h[i], (i << 4));
|
|
1492 |
}
|
|
1493 |
}
|
|
1494 |
} else if (zero_count == 1) {
|
|
1495 |
// one MOVZ and two MOVKs will do
|
|
1496 |
for (i = 0; i < 4; i++) {
|
|
1497 |
if (imm_h[i] != 0L) {
|
|
1498 |
movz(dst, (u_int32_t)imm_h[i], (i << 4));
|
|
1499 |
i++;
|
|
1500 |
break;
|
|
1501 |
}
|
|
1502 |
}
|
|
1503 |
for (;i < 4; i++) {
|
|
1504 |
if (imm_h[i] != 0x0L) {
|
|
1505 |
movk(dst, (u_int32_t)imm_h[i], (i << 4));
|
|
1506 |
}
|
|
1507 |
}
|
|
1508 |
} else if (neg_count == 1) {
|
|
1509 |
// one MOVN and two MOVKs will do
|
|
1510 |
for (i = 0; i < 4; i++) {
|
|
1511 |
if (imm_h[i] != 0xffffL) {
|
|
1512 |
movn(dst, (u_int32_t)imm_h[i] ^ 0xffffL, (i << 4));
|
|
1513 |
i++;
|
|
1514 |
break;
|
|
1515 |
}
|
|
1516 |
}
|
|
1517 |
for (;i < 4; i++) {
|
|
1518 |
if (imm_h[i] != 0xffffL) {
|
|
1519 |
movk(dst, (u_int32_t)imm_h[i], (i << 4));
|
|
1520 |
}
|
|
1521 |
}
|
|
1522 |
} else {
|
|
1523 |
// use a MOVZ and 3 MOVKs (makes it easier to debug)
|
|
1524 |
movz(dst, (u_int32_t)imm_h[0], 0);
|
|
1525 |
for (i = 1; i < 4; i++) {
|
|
1526 |
movk(dst, (u_int32_t)imm_h[i], (i << 4));
|
|
1527 |
}
|
|
1528 |
}
|
|
1529 |
}
|
|
1530 |
}
|
|
1531 |
|
|
1532 |
void MacroAssembler::mov_immediate32(Register dst, u_int32_t imm32)
|
|
1533 |
{
|
|
1534 |
#ifndef PRODUCT
|
|
1535 |
{
|
|
1536 |
char buffer[64];
|
|
1537 |
snprintf(buffer, sizeof(buffer), "0x%"PRIX32, imm32);
|
|
1538 |
block_comment(buffer);
|
|
1539 |
}
|
|
1540 |
#endif
|
|
1541 |
if (operand_valid_for_logical_immediate(true, imm32)) {
|
|
1542 |
orrw(dst, zr, imm32);
|
|
1543 |
} else {
|
|
1544 |
// we can use MOVZ, MOVN or two calls to MOVK to build up the
|
|
1545 |
// constant
|
|
1546 |
u_int32_t imm_h[2];
|
|
1547 |
imm_h[0] = imm32 & 0xffff;
|
|
1548 |
imm_h[1] = ((imm32 >> 16) & 0xffff);
|
|
1549 |
if (imm_h[0] == 0) {
|
|
1550 |
movzw(dst, imm_h[1], 16);
|
|
1551 |
} else if (imm_h[0] == 0xffff) {
|
|
1552 |
movnw(dst, imm_h[1] ^ 0xffff, 16);
|
|
1553 |
} else if (imm_h[1] == 0) {
|
|
1554 |
movzw(dst, imm_h[0], 0);
|
|
1555 |
} else if (imm_h[1] == 0xffff) {
|
|
1556 |
movnw(dst, imm_h[0] ^ 0xffff, 0);
|
|
1557 |
} else {
|
|
1558 |
// use a MOVZ and MOVK (makes it easier to debug)
|
|
1559 |
movzw(dst, imm_h[0], 0);
|
|
1560 |
movkw(dst, imm_h[1], 16);
|
|
1561 |
}
|
|
1562 |
}
|
|
1563 |
}
|
|
1564 |
|
|
1565 |
// Form an address from base + offset in Rd. Rd may or may
|
|
1566 |
// not actually be used: you must use the Address that is returned.
|
|
1567 |
// It is up to you to ensure that the shift provided matches the size
|
|
1568 |
// of your data.
|
|
1569 |
Address MacroAssembler::form_address(Register Rd, Register base, long byte_offset, int shift) {
|
|
1570 |
if (Address::offset_ok_for_immed(byte_offset, shift))
|
|
1571 |
// It fits; no need for any heroics
|
|
1572 |
return Address(base, byte_offset);
|
|
1573 |
|
|
1574 |
// Don't do anything clever with negative or misaligned offsets
|
|
1575 |
unsigned mask = (1 << shift) - 1;
|
|
1576 |
if (byte_offset < 0 || byte_offset & mask) {
|
|
1577 |
mov(Rd, byte_offset);
|
|
1578 |
add(Rd, base, Rd);
|
|
1579 |
return Address(Rd);
|
|
1580 |
}
|
|
1581 |
|
|
1582 |
// See if we can do this with two 12-bit offsets
|
|
1583 |
{
|
|
1584 |
unsigned long word_offset = byte_offset >> shift;
|
|
1585 |
unsigned long masked_offset = word_offset & 0xfff000;
|
|
1586 |
if (Address::offset_ok_for_immed(word_offset - masked_offset)
|
|
1587 |
&& Assembler::operand_valid_for_add_sub_immediate(masked_offset << shift)) {
|
|
1588 |
add(Rd, base, masked_offset << shift);
|
|
1589 |
word_offset -= masked_offset;
|
|
1590 |
return Address(Rd, word_offset << shift);
|
|
1591 |
}
|
|
1592 |
}
|
|
1593 |
|
|
1594 |
// Do it the hard way
|
|
1595 |
mov(Rd, byte_offset);
|
|
1596 |
add(Rd, base, Rd);
|
|
1597 |
return Address(Rd);
|
|
1598 |
}
|
|
1599 |
|
|
1600 |
void MacroAssembler::atomic_incw(Register counter_addr, Register tmp) {
|
|
1601 |
Label retry_load;
|
|
1602 |
bind(retry_load);
|
|
1603 |
// flush and load exclusive from the memory location
|
|
1604 |
ldxrw(tmp, counter_addr);
|
|
1605 |
addw(tmp, tmp, 1);
|
|
1606 |
// if we store+flush with no intervening write tmp wil be zero
|
|
1607 |
stxrw(tmp, tmp, counter_addr);
|
|
1608 |
cbnzw(tmp, retry_load);
|
|
1609 |
}
|
|
1610 |
|
|
1611 |
|
|
1612 |
int MacroAssembler::corrected_idivl(Register result, Register ra, Register rb,
|
|
1613 |
bool want_remainder, Register scratch)
|
|
1614 |
{
|
|
1615 |
// Full implementation of Java idiv and irem. The function
|
|
1616 |
// returns the (pc) offset of the div instruction - may be needed
|
|
1617 |
// for implicit exceptions.
|
|
1618 |
//
|
|
1619 |
// constraint : ra/rb =/= scratch
|
|
1620 |
// normal case
|
|
1621 |
//
|
|
1622 |
// input : ra: dividend
|
|
1623 |
// rb: divisor
|
|
1624 |
//
|
|
1625 |
// result: either
|
|
1626 |
// quotient (= ra idiv rb)
|
|
1627 |
// remainder (= ra irem rb)
|
|
1628 |
|
|
1629 |
assert(ra != scratch && rb != scratch, "reg cannot be scratch");
|
|
1630 |
|
|
1631 |
int idivl_offset = offset();
|
|
1632 |
if (! want_remainder) {
|
|
1633 |
sdivw(result, ra, rb);
|
|
1634 |
} else {
|
|
1635 |
sdivw(scratch, ra, rb);
|
|
1636 |
msubw(result, scratch, rb, ra);
|
|
1637 |
}
|
|
1638 |
|
|
1639 |
return idivl_offset;
|
|
1640 |
}
|
|
1641 |
|
|
1642 |
int MacroAssembler::corrected_idivq(Register result, Register ra, Register rb,
|
|
1643 |
bool want_remainder, Register scratch)
|
|
1644 |
{
|
|
1645 |
// Full implementation of Java ldiv and lrem. The function
|
|
1646 |
// returns the (pc) offset of the div instruction - may be needed
|
|
1647 |
// for implicit exceptions.
|
|
1648 |
//
|
|
1649 |
// constraint : ra/rb =/= scratch
|
|
1650 |
// normal case
|
|
1651 |
//
|
|
1652 |
// input : ra: dividend
|
|
1653 |
// rb: divisor
|
|
1654 |
//
|
|
1655 |
// result: either
|
|
1656 |
// quotient (= ra idiv rb)
|
|
1657 |
// remainder (= ra irem rb)
|
|
1658 |
|
|
1659 |
assert(ra != scratch && rb != scratch, "reg cannot be scratch");
|
|
1660 |
|
|
1661 |
int idivq_offset = offset();
|
|
1662 |
if (! want_remainder) {
|
|
1663 |
sdiv(result, ra, rb);
|
|
1664 |
} else {
|
|
1665 |
sdiv(scratch, ra, rb);
|
|
1666 |
msub(result, scratch, rb, ra);
|
|
1667 |
}
|
|
1668 |
|
|
1669 |
return idivq_offset;
|
|
1670 |
}
|
|
1671 |
|
|
1672 |
// MacroAssembler routines found actually to be needed
|
|
1673 |
|
|
1674 |
void MacroAssembler::push(Register src)
|
|
1675 |
{
|
|
1676 |
str(src, Address(pre(esp, -1 * wordSize)));
|
|
1677 |
}
|
|
1678 |
|
|
1679 |
void MacroAssembler::pop(Register dst)
|
|
1680 |
{
|
|
1681 |
ldr(dst, Address(post(esp, 1 * wordSize)));
|
|
1682 |
}
|
|
1683 |
|
|
1684 |
// Note: load_unsigned_short used to be called load_unsigned_word.
|
|
1685 |
int MacroAssembler::load_unsigned_short(Register dst, Address src) {
|
|
1686 |
int off = offset();
|
|
1687 |
ldrh(dst, src);
|
|
1688 |
return off;
|
|
1689 |
}
|
|
1690 |
|
|
1691 |
int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
|
|
1692 |
int off = offset();
|
|
1693 |
ldrb(dst, src);
|
|
1694 |
return off;
|
|
1695 |
}
|
|
1696 |
|
|
1697 |
int MacroAssembler::load_signed_short(Register dst, Address src) {
|
|
1698 |
int off = offset();
|
|
1699 |
ldrsh(dst, src);
|
|
1700 |
return off;
|
|
1701 |
}
|
|
1702 |
|
|
1703 |
int MacroAssembler::load_signed_byte(Register dst, Address src) {
|
|
1704 |
int off = offset();
|
|
1705 |
ldrsb(dst, src);
|
|
1706 |
return off;
|
|
1707 |
}
|
|
1708 |
|
|
1709 |
int MacroAssembler::load_signed_short32(Register dst, Address src) {
|
|
1710 |
int off = offset();
|
|
1711 |
ldrshw(dst, src);
|
|
1712 |
return off;
|
|
1713 |
}
|
|
1714 |
|
|
1715 |
int MacroAssembler::load_signed_byte32(Register dst, Address src) {
|
|
1716 |
int off = offset();
|
|
1717 |
ldrsbw(dst, src);
|
|
1718 |
return off;
|
|
1719 |
}
|
|
1720 |
|
|
1721 |
void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) {
|
|
1722 |
switch (size_in_bytes) {
|
|
1723 |
case 8: ldr(dst, src); break;
|
|
1724 |
case 4: ldrw(dst, src); break;
|
|
1725 |
case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
|
|
1726 |
case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
|
|
1727 |
default: ShouldNotReachHere();
|
|
1728 |
}
|
|
1729 |
}
|
|
1730 |
|
|
1731 |
void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) {
|
|
1732 |
switch (size_in_bytes) {
|
|
1733 |
case 8: str(src, dst); break;
|
|
1734 |
case 4: strw(src, dst); break;
|
|
1735 |
case 2: strh(src, dst); break;
|
|
1736 |
case 1: strb(src, dst); break;
|
|
1737 |
default: ShouldNotReachHere();
|
|
1738 |
}
|
|
1739 |
}
|
|
1740 |
|
|
1741 |
void MacroAssembler::decrementw(Register reg, int value)
|
|
1742 |
{
|
|
1743 |
if (value < 0) { incrementw(reg, -value); return; }
|
|
1744 |
if (value == 0) { return; }
|
|
1745 |
if (value < (1 << 12)) { subw(reg, reg, value); return; }
|
|
1746 |
/* else */ {
|
|
1747 |
guarantee(reg != rscratch2, "invalid dst for register decrement");
|
|
1748 |
movw(rscratch2, (unsigned)value);
|
|
1749 |
subw(reg, reg, rscratch2);
|
|
1750 |
}
|
|
1751 |
}
|
|
1752 |
|
|
1753 |
void MacroAssembler::decrement(Register reg, int value)
|
|
1754 |
{
|
|
1755 |
if (value < 0) { increment(reg, -value); return; }
|
|
1756 |
if (value == 0) { return; }
|
|
1757 |
if (value < (1 << 12)) { sub(reg, reg, value); return; }
|
|
1758 |
/* else */ {
|
|
1759 |
assert(reg != rscratch2, "invalid dst for register decrement");
|
|
1760 |
mov(rscratch2, (unsigned long)value);
|
|
1761 |
sub(reg, reg, rscratch2);
|
|
1762 |
}
|
|
1763 |
}
|
|
1764 |
|
|
1765 |
void MacroAssembler::decrementw(Address dst, int value)
|
|
1766 |
{
|
|
1767 |
assert(!dst.uses(rscratch1), "invalid dst for address decrement");
|
|
1768 |
ldrw(rscratch1, dst);
|
|
1769 |
decrementw(rscratch1, value);
|
|
1770 |
strw(rscratch1, dst);
|
|
1771 |
}
|
|
1772 |
|
|
1773 |
void MacroAssembler::decrement(Address dst, int value)
|
|
1774 |
{
|
|
1775 |
assert(!dst.uses(rscratch1), "invalid address for decrement");
|
|
1776 |
ldr(rscratch1, dst);
|
|
1777 |
decrement(rscratch1, value);
|
|
1778 |
str(rscratch1, dst);
|
|
1779 |
}
|
|
1780 |
|
|
1781 |
void MacroAssembler::incrementw(Register reg, int value)
|
|
1782 |
{
|
|
1783 |
if (value < 0) { decrementw(reg, -value); return; }
|
|
1784 |
if (value == 0) { return; }
|
|
1785 |
if (value < (1 << 12)) { addw(reg, reg, value); return; }
|
|
1786 |
/* else */ {
|
|
1787 |
assert(reg != rscratch2, "invalid dst for register increment");
|
|
1788 |
movw(rscratch2, (unsigned)value);
|
|
1789 |
addw(reg, reg, rscratch2);
|
|
1790 |
}
|
|
1791 |
}
|
|
1792 |
|
|
1793 |
void MacroAssembler::increment(Register reg, int value)
|
|
1794 |
{
|
|
1795 |
if (value < 0) { decrement(reg, -value); return; }
|
|
1796 |
if (value == 0) { return; }
|
|
1797 |
if (value < (1 << 12)) { add(reg, reg, value); return; }
|
|
1798 |
/* else */ {
|
|
1799 |
assert(reg != rscratch2, "invalid dst for register increment");
|
|
1800 |
movw(rscratch2, (unsigned)value);
|
|
1801 |
add(reg, reg, rscratch2);
|
|
1802 |
}
|
|
1803 |
}
|
|
1804 |
|
|
1805 |
void MacroAssembler::incrementw(Address dst, int value)
|
|
1806 |
{
|
|
1807 |
assert(!dst.uses(rscratch1), "invalid dst for address increment");
|
|
1808 |
ldrw(rscratch1, dst);
|
|
1809 |
incrementw(rscratch1, value);
|
|
1810 |
strw(rscratch1, dst);
|
|
1811 |
}
|
|
1812 |
|
|
1813 |
void MacroAssembler::increment(Address dst, int value)
|
|
1814 |
{
|
|
1815 |
assert(!dst.uses(rscratch1), "invalid dst for address increment");
|
|
1816 |
ldr(rscratch1, dst);
|
|
1817 |
increment(rscratch1, value);
|
|
1818 |
str(rscratch1, dst);
|
|
1819 |
}
|
|
1820 |
|
|
1821 |
|
|
1822 |
void MacroAssembler::pusha() {
|
|
1823 |
push(0x7fffffff, sp);
|
|
1824 |
}
|
|
1825 |
|
|
1826 |
void MacroAssembler::popa() {
|
|
1827 |
pop(0x7fffffff, sp);
|
|
1828 |
}
|
|
1829 |
|
|
1830 |
// Push lots of registers in the bit set supplied. Don't push sp.
|
|
1831 |
// Return the number of words pushed
|
|
1832 |
int MacroAssembler::push(unsigned int bitset, Register stack) {
|
|
1833 |
int words_pushed = 0;
|
|
1834 |
|
|
1835 |
// Scan bitset to accumulate register pairs
|
|
1836 |
unsigned char regs[32];
|
|
1837 |
int count = 0;
|
|
1838 |
for (int reg = 0; reg <= 30; reg++) {
|
|
1839 |
if (1 & bitset)
|
|
1840 |
regs[count++] = reg;
|
|
1841 |
bitset >>= 1;
|
|
1842 |
}
|
|
1843 |
regs[count++] = zr->encoding_nocheck();
|
|
1844 |
count &= ~1; // Only push an even nuber of regs
|
|
1845 |
|
|
1846 |
if (count) {
|
|
1847 |
stp(as_Register(regs[0]), as_Register(regs[1]),
|
|
1848 |
Address(pre(stack, -count * wordSize)));
|
|
1849 |
words_pushed += 2;
|
|
1850 |
}
|
|
1851 |
for (int i = 2; i < count; i += 2) {
|
|
1852 |
stp(as_Register(regs[i]), as_Register(regs[i+1]),
|
|
1853 |
Address(stack, i * wordSize));
|
|
1854 |
words_pushed += 2;
|
|
1855 |
}
|
|
1856 |
|
|
1857 |
assert(words_pushed == count, "oops, pushed != count");
|
|
1858 |
|
|
1859 |
return count;
|
|
1860 |
}
|
|
1861 |
|
|
1862 |
int MacroAssembler::pop(unsigned int bitset, Register stack) {
|
|
1863 |
int words_pushed = 0;
|
|
1864 |
|
|
1865 |
// Scan bitset to accumulate register pairs
|
|
1866 |
unsigned char regs[32];
|
|
1867 |
int count = 0;
|
|
1868 |
for (int reg = 0; reg <= 30; reg++) {
|
|
1869 |
if (1 & bitset)
|
|
1870 |
regs[count++] = reg;
|
|
1871 |
bitset >>= 1;
|
|
1872 |
}
|
|
1873 |
regs[count++] = zr->encoding_nocheck();
|
|
1874 |
count &= ~1;
|
|
1875 |
|
|
1876 |
for (int i = 2; i < count; i += 2) {
|
|
1877 |
ldp(as_Register(regs[i]), as_Register(regs[i+1]),
|
|
1878 |
Address(stack, i * wordSize));
|
|
1879 |
words_pushed += 2;
|
|
1880 |
}
|
|
1881 |
if (count) {
|
|
1882 |
ldp(as_Register(regs[0]), as_Register(regs[1]),
|
|
1883 |
Address(post(stack, count * wordSize)));
|
|
1884 |
words_pushed += 2;
|
|
1885 |
}
|
|
1886 |
|
|
1887 |
assert(words_pushed == count, "oops, pushed != count");
|
|
1888 |
|
|
1889 |
return count;
|
|
1890 |
}
|
|
1891 |
#ifdef ASSERT
|
|
1892 |
void MacroAssembler::verify_heapbase(const char* msg) {
|
|
1893 |
#if 0
|
|
1894 |
assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed");
|
|
1895 |
assert (Universe::heap() != NULL, "java heap should be initialized");
|
|
1896 |
if (CheckCompressedOops) {
|
|
1897 |
Label ok;
|
|
1898 |
push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1
|
|
1899 |
cmpptr(rheapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
|
|
1900 |
br(Assembler::EQ, ok);
|
|
1901 |
stop(msg);
|
|
1902 |
bind(ok);
|
|
1903 |
pop(1 << rscratch1->encoding(), sp);
|
|
1904 |
}
|
|
1905 |
#endif
|
|
1906 |
}
|
|
1907 |
#endif
|
|
1908 |
|
|
1909 |
void MacroAssembler::stop(const char* msg) {
|
|
1910 |
address ip = pc();
|
|
1911 |
pusha();
|
|
1912 |
mov(c_rarg0, (address)msg);
|
|
1913 |
mov(c_rarg1, (address)ip);
|
|
1914 |
mov(c_rarg2, sp);
|
|
1915 |
mov(c_rarg3, CAST_FROM_FN_PTR(address, MacroAssembler::debug64));
|
|
1916 |
// call(c_rarg3);
|
|
1917 |
blrt(c_rarg3, 3, 0, 1);
|
|
1918 |
hlt(0);
|
|
1919 |
}
|
|
1920 |
|
|
1921 |
// If a constant does not fit in an immediate field, generate some
|
|
1922 |
// number of MOV instructions and then perform the operation.
|
|
1923 |
void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, unsigned imm,
|
|
1924 |
add_sub_imm_insn insn1,
|
|
1925 |
add_sub_reg_insn insn2) {
|
|
1926 |
assert(Rd != zr, "Rd = zr and not setting flags?");
|
|
1927 |
if (operand_valid_for_add_sub_immediate((int)imm)) {
|
|
1928 |
(this->*insn1)(Rd, Rn, imm);
|
|
1929 |
} else {
|
|
1930 |
if (uabs(imm) < (1 << 24)) {
|
|
1931 |
(this->*insn1)(Rd, Rn, imm & -(1 << 12));
|
|
1932 |
(this->*insn1)(Rd, Rd, imm & ((1 << 12)-1));
|
|
1933 |
} else {
|
|
1934 |
assert_different_registers(Rd, Rn);
|
|
1935 |
mov(Rd, (uint64_t)imm);
|
|
1936 |
(this->*insn2)(Rd, Rn, Rd, LSL, 0);
|
|
1937 |
}
|
|
1938 |
}
|
|
1939 |
}
|
|
1940 |
|
|
1941 |
// Seperate vsn which sets the flags. Optimisations are more restricted
|
|
1942 |
// because we must set the flags correctly.
|
|
1943 |
void MacroAssembler::wrap_adds_subs_imm_insn(Register Rd, Register Rn, unsigned imm,
|
|
1944 |
add_sub_imm_insn insn1,
|
|
1945 |
add_sub_reg_insn insn2) {
|
|
1946 |
if (operand_valid_for_add_sub_immediate((int)imm)) {
|
|
1947 |
(this->*insn1)(Rd, Rn, imm);
|
|
1948 |
} else {
|
|
1949 |
assert_different_registers(Rd, Rn);
|
|
1950 |
assert(Rd != zr, "overflow in immediate operand");
|
|
1951 |
mov(Rd, (uint64_t)imm);
|
|
1952 |
(this->*insn2)(Rd, Rn, Rd, LSL, 0);
|
|
1953 |
}
|
|
1954 |
}
|
|
1955 |
|
|
1956 |
|
|
1957 |
void MacroAssembler::add(Register Rd, Register Rn, RegisterOrConstant increment) {
|
|
1958 |
if (increment.is_register()) {
|
|
1959 |
add(Rd, Rn, increment.as_register());
|
|
1960 |
} else {
|
|
1961 |
add(Rd, Rn, increment.as_constant());
|
|
1962 |
}
|
|
1963 |
}
|
|
1964 |
|
|
1965 |
void MacroAssembler::addw(Register Rd, Register Rn, RegisterOrConstant increment) {
|
|
1966 |
if (increment.is_register()) {
|
|
1967 |
addw(Rd, Rn, increment.as_register());
|
|
1968 |
} else {
|
|
1969 |
addw(Rd, Rn, increment.as_constant());
|
|
1970 |
}
|
|
1971 |
}
|
|
1972 |
|
|
1973 |
void MacroAssembler::reinit_heapbase()
|
|
1974 |
{
|
|
1975 |
if (UseCompressedOops) {
|
|
1976 |
if (Universe::is_fully_initialized()) {
|
|
1977 |
mov(rheapbase, Universe::narrow_ptrs_base());
|
|
1978 |
} else {
|
|
1979 |
lea(rheapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
|
|
1980 |
ldr(rheapbase, Address(rheapbase));
|
|
1981 |
}
|
|
1982 |
}
|
|
1983 |
}
|
|
1984 |
|
|
1985 |
// this simulates the behaviour of the x86 cmpxchg instruction using a
|
|
1986 |
// load linked/store conditional pair. we use the acquire/release
|
|
1987 |
// versions of these instructions so that we flush pending writes as
|
|
1988 |
// per Java semantics.
|
|
1989 |
|
|
1990 |
// n.b the x86 version assumes the old value to be compared against is
|
|
1991 |
// in rax and updates rax with the value located in memory if the
|
|
1992 |
// cmpxchg fails. we supply a register for the old value explicitly
|
|
1993 |
|
|
1994 |
// the aarch64 load linked/store conditional instructions do not
|
|
1995 |
// accept an offset. so, unlike x86, we must provide a plain register
|
|
1996 |
// to identify the memory word to be compared/exchanged rather than a
|
|
1997 |
// register+offset Address.
|
|
1998 |
|
|
1999 |
void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp,
|
|
2000 |
Label &succeed, Label *fail) {
|
|
2001 |
// oldv holds comparison value
|
|
2002 |
// newv holds value to write in exchange
|
|
2003 |
// addr identifies memory word to compare against/update
|
|
2004 |
// tmp returns 0/1 for success/failure
|
|
2005 |
Label retry_load, nope;
|
|
2006 |
|
|
2007 |
bind(retry_load);
|
|
2008 |
// flush and load exclusive from the memory location
|
|
2009 |
// and fail if it is not what we expect
|
|
2010 |
ldaxr(tmp, addr);
|
|
2011 |
cmp(tmp, oldv);
|
|
2012 |
br(Assembler::NE, nope);
|
|
2013 |
// if we store+flush with no intervening write tmp wil be zero
|
|
2014 |
stlxr(tmp, newv, addr);
|
|
2015 |
cbzw(tmp, succeed);
|
|
2016 |
// retry so we only ever return after a load fails to compare
|
|
2017 |
// ensures we don't return a stale value after a failed write.
|
|
2018 |
b(retry_load);
|
|
2019 |
// if the memory word differs we return it in oldv and signal a fail
|
|
2020 |
bind(nope);
|
|
2021 |
membar(AnyAny);
|
|
2022 |
mov(oldv, tmp);
|
|
2023 |
if (fail)
|
|
2024 |
b(*fail);
|
|
2025 |
}
|
|
2026 |
|
|
2027 |
void MacroAssembler::cmpxchgw(Register oldv, Register newv, Register addr, Register tmp,
|
|
2028 |
Label &succeed, Label *fail) {
|
|
2029 |
// oldv holds comparison value
|
|
2030 |
// newv holds value to write in exchange
|
|
2031 |
// addr identifies memory word to compare against/update
|
|
2032 |
// tmp returns 0/1 for success/failure
|
|
2033 |
Label retry_load, nope;
|
|
2034 |
|
|
2035 |
bind(retry_load);
|
|
2036 |
// flush and load exclusive from the memory location
|
|
2037 |
// and fail if it is not what we expect
|
|
2038 |
ldaxrw(tmp, addr);
|
|
2039 |
cmp(tmp, oldv);
|
|
2040 |
br(Assembler::NE, nope);
|
|
2041 |
// if we store+flush with no intervening write tmp wil be zero
|
|
2042 |
stlxrw(tmp, newv, addr);
|
|
2043 |
cbzw(tmp, succeed);
|
|
2044 |
// retry so we only ever return after a load fails to compare
|
|
2045 |
// ensures we don't return a stale value after a failed write.
|
|
2046 |
b(retry_load);
|
|
2047 |
// if the memory word differs we return it in oldv and signal a fail
|
|
2048 |
bind(nope);
|
|
2049 |
membar(AnyAny);
|
|
2050 |
mov(oldv, tmp);
|
|
2051 |
if (fail)
|
|
2052 |
b(*fail);
|
|
2053 |
}
|
|
2054 |
|
|
2055 |
static bool different(Register a, RegisterOrConstant b, Register c) {
|
|
2056 |
if (b.is_constant())
|
|
2057 |
return a != c;
|
|
2058 |
else
|
|
2059 |
return a != b.as_register() && a != c && b.as_register() != c;
|
|
2060 |
}
|
|
2061 |
|
|
2062 |
#define ATOMIC_OP(LDXR, OP, STXR) \
|
|
2063 |
void MacroAssembler::atomic_##OP(Register prev, RegisterOrConstant incr, Register addr) { \
|
|
2064 |
Register result = rscratch2; \
|
|
2065 |
if (prev->is_valid()) \
|
|
2066 |
result = different(prev, incr, addr) ? prev : rscratch2; \
|
|
2067 |
\
|
|
2068 |
Label retry_load; \
|
|
2069 |
bind(retry_load); \
|
|
2070 |
LDXR(result, addr); \
|
|
2071 |
OP(rscratch1, result, incr); \
|
|
2072 |
STXR(rscratch1, rscratch1, addr); \
|
|
2073 |
cbnzw(rscratch1, retry_load); \
|
|
2074 |
if (prev->is_valid() && prev != result) \
|
|
2075 |
mov(prev, result); \
|
|
2076 |
}
|
|
2077 |
|
|
2078 |
ATOMIC_OP(ldxr, add, stxr)
|
|
2079 |
ATOMIC_OP(ldxrw, addw, stxrw)
|
|
2080 |
|
|
2081 |
#undef ATOMIC_OP
|
|
2082 |
|
|
2083 |
#define ATOMIC_XCHG(OP, LDXR, STXR) \
|
|
2084 |
void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { \
|
|
2085 |
Register result = rscratch2; \
|
|
2086 |
if (prev->is_valid()) \
|
|
2087 |
result = different(prev, newv, addr) ? prev : rscratch2; \
|
|
2088 |
\
|
|
2089 |
Label retry_load; \
|
|
2090 |
bind(retry_load); \
|
|
2091 |
LDXR(result, addr); \
|
|
2092 |
STXR(rscratch1, newv, addr); \
|
|
2093 |
cbnzw(rscratch1, retry_load); \
|
|
2094 |
if (prev->is_valid() && prev != result) \
|
|
2095 |
mov(prev, result); \
|
|
2096 |
}
|
|
2097 |
|
|
2098 |
ATOMIC_XCHG(xchg, ldxr, stxr)
|
|
2099 |
ATOMIC_XCHG(xchgw, ldxrw, stxrw)
|
|
2100 |
|
|
2101 |
#undef ATOMIC_XCHG
|
|
2102 |
|
|
2103 |
void MacroAssembler::incr_allocated_bytes(Register thread,
|
|
2104 |
Register var_size_in_bytes,
|
|
2105 |
int con_size_in_bytes,
|
|
2106 |
Register t1) {
|
|
2107 |
if (!thread->is_valid()) {
|
|
2108 |
thread = rthread;
|
|
2109 |
}
|
|
2110 |
assert(t1->is_valid(), "need temp reg");
|
|
2111 |
|
|
2112 |
ldr(t1, Address(thread, in_bytes(JavaThread::allocated_bytes_offset())));
|
|
2113 |
if (var_size_in_bytes->is_valid()) {
|
|
2114 |
add(t1, t1, var_size_in_bytes);
|
|
2115 |
} else {
|
|
2116 |
add(t1, t1, con_size_in_bytes);
|
|
2117 |
}
|
|
2118 |
str(t1, Address(thread, in_bytes(JavaThread::allocated_bytes_offset())));
|
|
2119 |
}
|
|
2120 |
|
|
2121 |
#ifndef PRODUCT
|
|
2122 |
extern "C" void findpc(intptr_t x);
|
|
2123 |
#endif
|
|
2124 |
|
|
2125 |
void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[])
|
|
2126 |
{
|
|
2127 |
// In order to get locks to work, we need to fake a in_VM state
|
|
2128 |
if (ShowMessageBoxOnError ) {
|
|
2129 |
JavaThread* thread = JavaThread::current();
|
|
2130 |
JavaThreadState saved_state = thread->thread_state();
|
|
2131 |
thread->set_thread_state(_thread_in_vm);
|
|
2132 |
#ifndef PRODUCT
|
|
2133 |
if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
|
|
2134 |
ttyLocker ttyl;
|
|
2135 |
BytecodeCounter::print();
|
|
2136 |
}
|
|
2137 |
#endif
|
|
2138 |
if (os::message_box(msg, "Execution stopped, print registers?")) {
|
|
2139 |
ttyLocker ttyl;
|
|
2140 |
tty->print_cr(" pc = 0x%016lx", pc);
|
|
2141 |
#ifndef PRODUCT
|
|
2142 |
tty->cr();
|
|
2143 |
findpc(pc);
|
|
2144 |
tty->cr();
|
|
2145 |
#endif
|
|
2146 |
tty->print_cr(" r0 = 0x%016lx", regs[0]);
|
|
2147 |
tty->print_cr(" r1 = 0x%016lx", regs[1]);
|
|
2148 |
tty->print_cr(" r2 = 0x%016lx", regs[2]);
|
|
2149 |
tty->print_cr(" r3 = 0x%016lx", regs[3]);
|
|
2150 |
tty->print_cr(" r4 = 0x%016lx", regs[4]);
|
|
2151 |
tty->print_cr(" r5 = 0x%016lx", regs[5]);
|
|
2152 |
tty->print_cr(" r6 = 0x%016lx", regs[6]);
|
|
2153 |
tty->print_cr(" r7 = 0x%016lx", regs[7]);
|
|
2154 |
tty->print_cr(" r8 = 0x%016lx", regs[8]);
|
|
2155 |
tty->print_cr(" r9 = 0x%016lx", regs[9]);
|
|
2156 |
tty->print_cr("r10 = 0x%016lx", regs[10]);
|
|
2157 |
tty->print_cr("r11 = 0x%016lx", regs[11]);
|
|
2158 |
tty->print_cr("r12 = 0x%016lx", regs[12]);
|
|
2159 |
tty->print_cr("r13 = 0x%016lx", regs[13]);
|
|
2160 |
tty->print_cr("r14 = 0x%016lx", regs[14]);
|
|
2161 |
tty->print_cr("r15 = 0x%016lx", regs[15]);
|
|
2162 |
tty->print_cr("r16 = 0x%016lx", regs[16]);
|
|
2163 |
tty->print_cr("r17 = 0x%016lx", regs[17]);
|
|
2164 |
tty->print_cr("r18 = 0x%016lx", regs[18]);
|
|
2165 |
tty->print_cr("r19 = 0x%016lx", regs[19]);
|
|
2166 |
tty->print_cr("r20 = 0x%016lx", regs[20]);
|
|
2167 |
tty->print_cr("r21 = 0x%016lx", regs[21]);
|
|
2168 |
tty->print_cr("r22 = 0x%016lx", regs[22]);
|
|
2169 |
tty->print_cr("r23 = 0x%016lx", regs[23]);
|
|
2170 |
tty->print_cr("r24 = 0x%016lx", regs[24]);
|
|
2171 |
tty->print_cr("r25 = 0x%016lx", regs[25]);
|
|
2172 |
tty->print_cr("r26 = 0x%016lx", regs[26]);
|
|
2173 |
tty->print_cr("r27 = 0x%016lx", regs[27]);
|
|
2174 |
tty->print_cr("r28 = 0x%016lx", regs[28]);
|
|
2175 |
tty->print_cr("r30 = 0x%016lx", regs[30]);
|
|
2176 |
tty->print_cr("r31 = 0x%016lx", regs[31]);
|
|
2177 |
BREAKPOINT;
|
|
2178 |
}
|
|
2179 |
ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
|
|
2180 |
} else {
|
|
2181 |
ttyLocker ttyl;
|
|
2182 |
::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n",
|
|
2183 |
msg);
|
|
2184 |
assert(false, err_msg("DEBUG MESSAGE: %s", msg));
|
|
2185 |
}
|
|
2186 |
}
|
|
2187 |
|
|
2188 |
#ifdef BUILTIN_SIM
|
|
2189 |
// routine to generate an x86 prolog for a stub function which
|
|
2190 |
// bootstraps into the generated ARM code which directly follows the
|
|
2191 |
// stub
|
|
2192 |
//
|
|
2193 |
// the argument encodes the number of general and fp registers
|
|
2194 |
// passed by the caller and the callng convention (currently just
|
|
2195 |
// the number of general registers and assumes C argument passing)
|
|
2196 |
|
|
2197 |
extern "C" {
|
|
2198 |
int aarch64_stub_prolog_size();
|
|
2199 |
void aarch64_stub_prolog();
|
|
2200 |
void aarch64_prolog();
|
|
2201 |
}
|
|
2202 |
|
|
2203 |
void MacroAssembler::c_stub_prolog(int gp_arg_count, int fp_arg_count, int ret_type,
|
|
2204 |
address *prolog_ptr)
|
|
2205 |
{
|
|
2206 |
int calltype = (((ret_type & 0x3) << 8) |
|
|
2207 |
((fp_arg_count & 0xf) << 4) |
|
|
2208 |
(gp_arg_count & 0xf));
|
|
2209 |
|
|
2210 |
// the addresses for the x86 to ARM entry code we need to use
|
|
2211 |
address start = pc();
|
|
2212 |
// printf("start = %lx\n", start);
|
|
2213 |
int byteCount = aarch64_stub_prolog_size();
|
|
2214 |
// printf("byteCount = %x\n", byteCount);
|
|
2215 |
int instructionCount = (byteCount + 3)/ 4;
|
|
2216 |
// printf("instructionCount = %x\n", instructionCount);
|
|
2217 |
for (int i = 0; i < instructionCount; i++) {
|
|
2218 |
nop();
|
|
2219 |
}
|
|
2220 |
|
|
2221 |
memcpy(start, (void*)aarch64_stub_prolog, byteCount);
|
|
2222 |
|
|
2223 |
// write the address of the setup routine and the call format at the
|
|
2224 |
// end of into the copied code
|
|
2225 |
u_int64_t *patch_end = (u_int64_t *)(start + byteCount);
|
|
2226 |
if (prolog_ptr)
|
|
2227 |
patch_end[-2] = (u_int64_t)prolog_ptr;
|
|
2228 |
patch_end[-1] = calltype;
|
|
2229 |
}
|
|
2230 |
#endif
|
|
2231 |
|
|
2232 |
void MacroAssembler::push_CPU_state() {
|
|
2233 |
push(0x3fffffff, sp); // integer registers except lr & sp
|
|
2234 |
|
|
2235 |
for (int i = 30; i >= 0; i -= 2)
|
|
2236 |
stpd(as_FloatRegister(i), as_FloatRegister(i+1),
|
|
2237 |
Address(pre(sp, -2 * wordSize)));
|
|
2238 |
}
|
|
2239 |
|
|
2240 |
void MacroAssembler::pop_CPU_state() {
|
|
2241 |
for (int i = 0; i < 32; i += 2)
|
|
2242 |
ldpd(as_FloatRegister(i), as_FloatRegister(i+1),
|
|
2243 |
Address(post(sp, 2 * wordSize)));
|
|
2244 |
|
|
2245 |
pop(0x3fffffff, sp); // integer registers except lr & sp
|
|
2246 |
}
|
|
2247 |
|
|
2248 |
/**
|
|
2249 |
* Emits code to update CRC-32 with a byte value according to constants in table
|
|
2250 |
*
|
|
2251 |
* @param [in,out]crc Register containing the crc.
|
|
2252 |
* @param [in]val Register containing the byte to fold into the CRC.
|
|
2253 |
* @param [in]table Register containing the table of crc constants.
|
|
2254 |
*
|
|
2255 |
* uint32_t crc;
|
|
2256 |
* val = crc_table[(val ^ crc) & 0xFF];
|
|
2257 |
* crc = val ^ (crc >> 8);
|
|
2258 |
*
|
|
2259 |
*/
|
|
2260 |
void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
|
|
2261 |
eor(val, val, crc);
|
|
2262 |
andr(val, val, 0xff);
|
|
2263 |
ldrw(val, Address(table, val, Address::lsl(2)));
|
|
2264 |
eor(crc, val, crc, Assembler::LSR, 8);
|
|
2265 |
}
|
|
2266 |
|
|
2267 |
/**
|
|
2268 |
* Emits code to update CRC-32 with a 32-bit value according to tables 0 to 3
|
|
2269 |
*
|
|
2270 |
* @param [in,out]crc Register containing the crc.
|
|
2271 |
* @param [in]v Register containing the 32-bit to fold into the CRC.
|
|
2272 |
* @param [in]table0 Register containing table 0 of crc constants.
|
|
2273 |
* @param [in]table1 Register containing table 1 of crc constants.
|
|
2274 |
* @param [in]table2 Register containing table 2 of crc constants.
|
|
2275 |
* @param [in]table3 Register containing table 3 of crc constants.
|
|
2276 |
*
|
|
2277 |
* uint32_t crc;
|
|
2278 |
* v = crc ^ v
|
|
2279 |
* crc = table3[v&0xff]^table2[(v>>8)&0xff]^table1[(v>>16)&0xff]^table0[v>>24]
|
|
2280 |
*
|
|
2281 |
*/
|
|
2282 |
void MacroAssembler::update_word_crc32(Register crc, Register v, Register tmp,
|
|
2283 |
Register table0, Register table1, Register table2, Register table3,
|
|
2284 |
bool upper) {
|
|
2285 |
eor(v, crc, v, upper ? LSR:LSL, upper ? 32:0);
|
|
2286 |
uxtb(tmp, v);
|
|
2287 |
ldrw(crc, Address(table3, tmp, Address::lsl(2)));
|
|
2288 |
ubfx(tmp, v, 8, 8);
|
|
2289 |
ldrw(tmp, Address(table2, tmp, Address::lsl(2)));
|
|
2290 |
eor(crc, crc, tmp);
|
|
2291 |
ubfx(tmp, v, 16, 8);
|
|
2292 |
ldrw(tmp, Address(table1, tmp, Address::lsl(2)));
|
|
2293 |
eor(crc, crc, tmp);
|
|
2294 |
ubfx(tmp, v, 24, 8);
|
|
2295 |
ldrw(tmp, Address(table0, tmp, Address::lsl(2)));
|
|
2296 |
eor(crc, crc, tmp);
|
|
2297 |
}
|
|
2298 |
|
|
2299 |
/**
|
|
2300 |
* @param crc register containing existing CRC (32-bit)
|
|
2301 |
* @param buf register pointing to input byte buffer (byte*)
|
|
2302 |
* @param len register containing number of bytes
|
|
2303 |
* @param table register that will contain address of CRC table
|
|
2304 |
* @param tmp scratch register
|
|
2305 |
*/
|
|
2306 |
void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len,
|
|
2307 |
Register table0, Register table1, Register table2, Register table3,
|
|
2308 |
Register tmp, Register tmp2, Register tmp3) {
|
|
2309 |
Label L_by16, L_by16_loop, L_by4, L_by4_loop, L_by1, L_by1_loop, L_exit;
|
|
2310 |
unsigned long offset;
|
|
2311 |
|
|
2312 |
ornw(crc, zr, crc);
|
|
2313 |
|
|
2314 |
if (UseCRC32) {
|
|
2315 |
Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop;
|
|
2316 |
|
|
2317 |
subs(len, len, 64);
|
|
2318 |
br(Assembler::GE, CRC_by64_loop);
|
|
2319 |
adds(len, len, 64-4);
|
|
2320 |
br(Assembler::GE, CRC_by4_loop);
|
|
2321 |
adds(len, len, 4);
|
|
2322 |
br(Assembler::GT, CRC_by1_loop);
|
|
2323 |
b(L_exit);
|
|
2324 |
|
|
2325 |
BIND(CRC_by4_loop);
|
|
2326 |
ldrw(tmp, Address(post(buf, 4)));
|
|
2327 |
subs(len, len, 4);
|
|
2328 |
crc32w(crc, crc, tmp);
|
|
2329 |
br(Assembler::GE, CRC_by4_loop);
|
|
2330 |
adds(len, len, 4);
|
|
2331 |
br(Assembler::LE, L_exit);
|
|
2332 |
BIND(CRC_by1_loop);
|
|
2333 |
ldrb(tmp, Address(post(buf, 1)));
|
|
2334 |
subs(len, len, 1);
|
|
2335 |
crc32b(crc, crc, tmp);
|
|
2336 |
br(Assembler::GT, CRC_by1_loop);
|
|
2337 |
b(L_exit);
|
|
2338 |
|
|
2339 |
align(CodeEntryAlignment);
|
|
2340 |
BIND(CRC_by64_loop);
|
|
2341 |
subs(len, len, 64);
|
|
2342 |
ldp(tmp, tmp3, Address(post(buf, 16)));
|
|
2343 |
crc32x(crc, crc, tmp);
|
|
2344 |
crc32x(crc, crc, tmp3);
|
|
2345 |
ldp(tmp, tmp3, Address(post(buf, 16)));
|
|
2346 |
crc32x(crc, crc, tmp);
|
|
2347 |
crc32x(crc, crc, tmp3);
|
|
2348 |
ldp(tmp, tmp3, Address(post(buf, 16)));
|
|
2349 |
crc32x(crc, crc, tmp);
|
|
2350 |
crc32x(crc, crc, tmp3);
|
|
2351 |
ldp(tmp, tmp3, Address(post(buf, 16)));
|
|
2352 |
crc32x(crc, crc, tmp);
|
|
2353 |
crc32x(crc, crc, tmp3);
|
|
2354 |
br(Assembler::GE, CRC_by64_loop);
|
|
2355 |
adds(len, len, 64-4);
|
|
2356 |
br(Assembler::GE, CRC_by4_loop);
|
|
2357 |
adds(len, len, 4);
|
|
2358 |
br(Assembler::GT, CRC_by1_loop);
|
|
2359 |
BIND(L_exit);
|
|
2360 |
ornw(crc, zr, crc);
|
|
2361 |
return;
|
|
2362 |
}
|
|
2363 |
|
|
2364 |
adrp(table0, ExternalAddress(StubRoutines::crc_table_addr()), offset);
|
|
2365 |
if (offset) add(table0, table0, offset);
|
|
2366 |
add(table1, table0, 1*256*sizeof(juint));
|
|
2367 |
add(table2, table0, 2*256*sizeof(juint));
|
|
2368 |
add(table3, table0, 3*256*sizeof(juint));
|
|
2369 |
|
|
2370 |
if (UseNeon) {
|
|
2371 |
cmp(len, 64);
|
|
2372 |
br(Assembler::LT, L_by16);
|
|
2373 |
eor(v16, T16B, v16, v16);
|
|
2374 |
|
|
2375 |
Label L_fold;
|
|
2376 |
|
|
2377 |
add(tmp, table0, 4*256*sizeof(juint)); // Point at the Neon constants
|
|
2378 |
|
|
2379 |
ld1(v0, v1, T2D, post(buf, 32));
|
|
2380 |
ld1r(v4, T2D, post(tmp, 8));
|
|
2381 |
ld1r(v5, T2D, post(tmp, 8));
|
|
2382 |
ld1r(v6, T2D, post(tmp, 8));
|
|
2383 |
ld1r(v7, T2D, post(tmp, 8));
|
|
2384 |
mov(v16, T4S, 0, crc);
|
|
2385 |
|
|
2386 |
eor(v0, T16B, v0, v16);
|
|
2387 |
sub(len, len, 64);
|
|
2388 |
|
|
2389 |
BIND(L_fold);
|
|
2390 |
pmull(v22, T8H, v0, v5, T8B);
|
|
2391 |
pmull(v20, T8H, v0, v7, T8B);
|
|
2392 |
pmull(v23, T8H, v0, v4, T8B);
|
|
2393 |
pmull(v21, T8H, v0, v6, T8B);
|
|
2394 |
|
|
2395 |
pmull2(v18, T8H, v0, v5, T16B);
|
|
2396 |
pmull2(v16, T8H, v0, v7, T16B);
|
|
2397 |
pmull2(v19, T8H, v0, v4, T16B);
|
|
2398 |
pmull2(v17, T8H, v0, v6, T16B);
|
|
2399 |
|
|
2400 |
uzp1(v24, v20, v22, T8H);
|
|
2401 |
uzp2(v25, v20, v22, T8H);
|
|
2402 |
eor(v20, T16B, v24, v25);
|
|
2403 |
|
|
2404 |
uzp1(v26, v16, v18, T8H);
|
|
2405 |
uzp2(v27, v16, v18, T8H);
|
|
2406 |
eor(v16, T16B, v26, v27);
|
|
2407 |
|
|
2408 |
ushll2(v22, T4S, v20, T8H, 8);
|
|
2409 |
ushll(v20, T4S, v20, T4H, 8);
|
|
2410 |
|
|
2411 |
ushll2(v18, T4S, v16, T8H, 8);
|
|
2412 |
ushll(v16, T4S, v16, T4H, 8);
|
|
2413 |
|
|
2414 |
eor(v22, T16B, v23, v22);
|
|
2415 |
eor(v18, T16B, v19, v18);
|
|
2416 |
eor(v20, T16B, v21, v20);
|
|
2417 |
eor(v16, T16B, v17, v16);
|
|
2418 |
|
|
2419 |
uzp1(v17, v16, v20, T2D);
|
|
2420 |
uzp2(v21, v16, v20, T2D);
|
|
2421 |
eor(v17, T16B, v17, v21);
|
|
2422 |
|
|
2423 |
ushll2(v20, T2D, v17, T4S, 16);
|
|
2424 |
ushll(v16, T2D, v17, T2S, 16);
|
|
2425 |
|
|
2426 |
eor(v20, T16B, v20, v22);
|
|
2427 |
eor(v16, T16B, v16, v18);
|
|
2428 |
|
|
2429 |
uzp1(v17, v20, v16, T2D);
|
|
2430 |
uzp2(v21, v20, v16, T2D);
|
|
2431 |
eor(v28, T16B, v17, v21);
|
|
2432 |
|
|
2433 |
pmull(v22, T8H, v1, v5, T8B);
|
|
2434 |
pmull(v20, T8H, v1, v7, T8B);
|
|
2435 |
pmull(v23, T8H, v1, v4, T8B);
|
|
2436 |
pmull(v21, T8H, v1, v6, T8B);
|
|
2437 |
|
|
2438 |
pmull2(v18, T8H, v1, v5, T16B);
|
|
2439 |
pmull2(v16, T8H, v1, v7, T16B);
|
|
2440 |
pmull2(v19, T8H, v1, v4, T16B);
|
|
2441 |
pmull2(v17, T8H, v1, v6, T16B);
|
|
2442 |
|
|
2443 |
ld1(v0, v1, T2D, post(buf, 32));
|
|
2444 |
|
|
2445 |
uzp1(v24, v20, v22, T8H);
|
|
2446 |
uzp2(v25, v20, v22, T8H);
|
|
2447 |
eor(v20, T16B, v24, v25);
|
|
2448 |
|
|
2449 |
uzp1(v26, v16, v18, T8H);
|
|
2450 |
uzp2(v27, v16, v18, T8H);
|
|
2451 |
eor(v16, T16B, v26, v27);
|
|
2452 |
|
|
2453 |
ushll2(v22, T4S, v20, T8H, 8);
|
|
2454 |
ushll(v20, T4S, v20, T4H, 8);
|
|
2455 |
|
|
2456 |
ushll2(v18, T4S, v16, T8H, 8);
|
|
2457 |
ushll(v16, T4S, v16, T4H, 8);
|
|
2458 |
|
|
2459 |
eor(v22, T16B, v23, v22);
|
|
2460 |
eor(v18, T16B, v19, v18);
|
|
2461 |
eor(v20, T16B, v21, v20);
|
|
2462 |
eor(v16, T16B, v17, v16);
|
|
2463 |
|
|
2464 |
uzp1(v17, v16, v20, T2D);
|
|
2465 |
uzp2(v21, v16, v20, T2D);
|
|
2466 |
eor(v16, T16B, v17, v21);
|
|
2467 |
|
|
2468 |
ushll2(v20, T2D, v16, T4S, 16);
|
|
2469 |
ushll(v16, T2D, v16, T2S, 16);
|
|
2470 |
|
|
2471 |
eor(v20, T16B, v22, v20);
|
|
2472 |
eor(v16, T16B, v16, v18);
|
|
2473 |
|
|
2474 |
uzp1(v17, v20, v16, T2D);
|
|
2475 |
uzp2(v21, v20, v16, T2D);
|
|
2476 |
eor(v20, T16B, v17, v21);
|
|
2477 |
|
|
2478 |
shl(v16, v28, T2D, 1);
|
|
2479 |
shl(v17, v20, T2D, 1);
|
|
2480 |
|
|
2481 |
eor(v0, T16B, v0, v16);
|
|
2482 |
eor(v1, T16B, v1, v17);
|
|
2483 |
|
|
2484 |
subs(len, len, 32);
|
|
2485 |
br(Assembler::GE, L_fold);
|
|
2486 |
|
|
2487 |
mov(crc, 0);
|
|
2488 |
mov(tmp, v0, T1D, 0);
|
|
2489 |
update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
|
|
2490 |
update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
|
|
2491 |
mov(tmp, v0, T1D, 1);
|
|
2492 |
update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
|
|
2493 |
update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
|
|
2494 |
mov(tmp, v1, T1D, 0);
|
|
2495 |
update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
|
|
2496 |
update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
|
|
2497 |
mov(tmp, v1, T1D, 1);
|
|
2498 |
update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
|
|
2499 |
update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
|
|
2500 |
|
|
2501 |
add(len, len, 32);
|
|
2502 |
}
|
|
2503 |
|
|
2504 |
BIND(L_by16);
|
|
2505 |
subs(len, len, 16);
|
|
2506 |
br(Assembler::GE, L_by16_loop);
|
|
2507 |
adds(len, len, 16-4);
|
|
2508 |
br(Assembler::GE, L_by4_loop);
|
|
2509 |
adds(len, len, 4);
|
|
2510 |
br(Assembler::GT, L_by1_loop);
|
|
2511 |
b(L_exit);
|
|
2512 |
|
|
2513 |
BIND(L_by4_loop);
|
|
2514 |
ldrw(tmp, Address(post(buf, 4)));
|
|
2515 |
update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3);
|
|
2516 |
subs(len, len, 4);
|
|
2517 |
br(Assembler::GE, L_by4_loop);
|
|
2518 |
adds(len, len, 4);
|
|
2519 |
br(Assembler::LE, L_exit);
|
|
2520 |
BIND(L_by1_loop);
|
|
2521 |
subs(len, len, 1);
|
|
2522 |
ldrb(tmp, Address(post(buf, 1)));
|
|
2523 |
update_byte_crc32(crc, tmp, table0);
|
|
2524 |
br(Assembler::GT, L_by1_loop);
|
|
2525 |
b(L_exit);
|
|
2526 |
|
|
2527 |
align(CodeEntryAlignment);
|
|
2528 |
BIND(L_by16_loop);
|
|
2529 |
subs(len, len, 16);
|
|
2530 |
ldp(tmp, tmp3, Address(post(buf, 16)));
|
|
2531 |
update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false);
|
|
2532 |
update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true);
|
|
2533 |
update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, false);
|
|
2534 |
update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, true);
|
|
2535 |
br(Assembler::GE, L_by16_loop);
|
|
2536 |
adds(len, len, 16-4);
|
|
2537 |
br(Assembler::GE, L_by4_loop);
|
|
2538 |
adds(len, len, 4);
|
|
2539 |
br(Assembler::GT, L_by1_loop);
|
|
2540 |
BIND(L_exit);
|
|
2541 |
ornw(crc, zr, crc);
|
|
2542 |
}
|
|
2543 |
|
|
2544 |
SkipIfEqual::SkipIfEqual(
|
|
2545 |
MacroAssembler* masm, const bool* flag_addr, bool value) {
|
|
2546 |
_masm = masm;
|
|
2547 |
unsigned long offset;
|
|
2548 |
_masm->adrp(rscratch1, ExternalAddress((address)flag_addr), offset);
|
|
2549 |
_masm->ldrb(rscratch1, Address(rscratch1, offset));
|
|
2550 |
_masm->cbzw(rscratch1, _label);
|
|
2551 |
}
|
|
2552 |
|
|
2553 |
SkipIfEqual::~SkipIfEqual() {
|
|
2554 |
_masm->bind(_label);
|
|
2555 |
}
|
|
2556 |
|
|
2557 |
void MacroAssembler::cmpptr(Register src1, Address src2) {
|
|
2558 |
unsigned long offset;
|
|
2559 |
adrp(rscratch1, src2, offset);
|
|
2560 |
ldr(rscratch1, Address(rscratch1, offset));
|
|
2561 |
cmp(src1, rscratch1);
|
|
2562 |
}
|
|
2563 |
|
|
2564 |
void MacroAssembler::store_check(Register obj) {
|
|
2565 |
// Does a store check for the oop in register obj. The content of
|
|
2566 |
// register obj is destroyed afterwards.
|
|
2567 |
store_check_part_1(obj);
|
|
2568 |
store_check_part_2(obj);
|
|
2569 |
}
|
|
2570 |
|
|
2571 |
void MacroAssembler::store_check(Register obj, Address dst) {
|
|
2572 |
store_check(obj);
|
|
2573 |
}
|
|
2574 |
|
|
2575 |
|
|
2576 |
// split the store check operation so that other instructions can be scheduled inbetween
|
|
2577 |
void MacroAssembler::store_check_part_1(Register obj) {
|
|
2578 |
BarrierSet* bs = Universe::heap()->barrier_set();
|
|
2579 |
assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
|
|
2580 |
lsr(obj, obj, CardTableModRefBS::card_shift);
|
|
2581 |
}
|
|
2582 |
|
|
2583 |
void MacroAssembler::store_check_part_2(Register obj) {
|
|
2584 |
BarrierSet* bs = Universe::heap()->barrier_set();
|
|
2585 |
assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
|
|
2586 |
CardTableModRefBS* ct = (CardTableModRefBS*)bs;
|
|
2587 |
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
|
|
2588 |
|
|
2589 |
// The calculation for byte_map_base is as follows:
|
|
2590 |
// byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
|
|
2591 |
// So this essentially converts an address to a displacement and
|
|
2592 |
// it will never need to be relocated.
|
|
2593 |
|
|
2594 |
// FIXME: It's not likely that disp will fit into an offset so we
|
|
2595 |
// don't bother to check, but it could save an instruction.
|
|
2596 |
intptr_t disp = (intptr_t) ct->byte_map_base;
|
|
2597 |
mov(rscratch1, disp);
|
|
2598 |
strb(zr, Address(obj, rscratch1));
|
|
2599 |
}
|
|
2600 |
|
|
2601 |
void MacroAssembler::load_klass(Register dst, Register src) {
|
|
2602 |
if (UseCompressedClassPointers) {
|
|
2603 |
ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
|
|
2604 |
decode_klass_not_null(dst);
|
|
2605 |
} else {
|
|
2606 |
ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
|
|
2607 |
}
|
|
2608 |
}
|
|
2609 |
|
|
2610 |
void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {
|
|
2611 |
if (UseCompressedClassPointers) {
|
|
2612 |
ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
|
|
2613 |
if (Universe::narrow_klass_base() == NULL) {
|
|
2614 |
cmp(trial_klass, tmp, LSL, Universe::narrow_klass_shift());
|
|
2615 |
return;
|
|
2616 |
} else if (((uint64_t)Universe::narrow_klass_base() & 0xffffffff) == 0
|
|
2617 |
&& Universe::narrow_klass_shift() == 0) {
|
|
2618 |
// Only the bottom 32 bits matter
|
|
2619 |
cmpw(trial_klass, tmp);
|
|
2620 |
return;
|
|
2621 |
}
|
|
2622 |
decode_klass_not_null(tmp);
|
|
2623 |
} else {
|
|
2624 |
ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
|
|
2625 |
}
|
|
2626 |
cmp(trial_klass, tmp);
|
|
2627 |
}
|
|
2628 |
|
|
2629 |
void MacroAssembler::load_prototype_header(Register dst, Register src) {
|
|
2630 |
load_klass(dst, src);
|
|
2631 |
ldr(dst, Address(dst, Klass::prototype_header_offset()));
|
|
2632 |
}
|
|
2633 |
|
|
2634 |
void MacroAssembler::store_klass(Register dst, Register src) {
|
|
2635 |
// FIXME: Should this be a store release? concurrent gcs assumes
|
|
2636 |
// klass length is valid if klass field is not null.
|
|
2637 |
if (UseCompressedClassPointers) {
|
|
2638 |
encode_klass_not_null(src);
|
|
2639 |
strw(src, Address(dst, oopDesc::klass_offset_in_bytes()));
|
|
2640 |
} else {
|
|
2641 |
str(src, Address(dst, oopDesc::klass_offset_in_bytes()));
|
|
2642 |
}
|
|
2643 |
}
|
|
2644 |
|
|
2645 |
void MacroAssembler::store_klass_gap(Register dst, Register src) {
|
|
2646 |
if (UseCompressedClassPointers) {
|
|
2647 |
// Store to klass gap in destination
|
|
2648 |
strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes()));
|
|
2649 |
}
|
|
2650 |
}
|
|
2651 |
|
|
2652 |
// Algorithm must match oop.inline.hpp encode_heap_oop.
|
|
2653 |
void MacroAssembler::encode_heap_oop(Register d, Register s) {
|
|
2654 |
#ifdef ASSERT
|
|
2655 |
verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
|
|
2656 |
#endif
|
|
2657 |
verify_oop(s, "broken oop in encode_heap_oop");
|
|
2658 |
if (Universe::narrow_oop_base() == NULL) {
|
|
2659 |
if (Universe::narrow_oop_shift() != 0) {
|
|
2660 |
assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
|
|
2661 |
lsr(d, s, LogMinObjAlignmentInBytes);
|
|
2662 |
} else {
|
|
2663 |
mov(d, s);
|
|
2664 |
}
|
|
2665 |
} else {
|
|
2666 |
subs(d, s, rheapbase);
|
|
2667 |
csel(d, d, zr, Assembler::HS);
|
|
2668 |
lsr(d, d, LogMinObjAlignmentInBytes);
|
|
2669 |
|
|
2670 |
/* Old algorithm: is this any worse?
|
|
2671 |
Label nonnull;
|
|
2672 |
cbnz(r, nonnull);
|
|
2673 |
sub(r, r, rheapbase);
|
|
2674 |
bind(nonnull);
|
|
2675 |
lsr(r, r, LogMinObjAlignmentInBytes);
|
|
2676 |
*/
|
|
2677 |
}
|
|
2678 |
}
|
|
2679 |
|
|
2680 |
void MacroAssembler::encode_heap_oop_not_null(Register r) {
|
|
2681 |
#ifdef ASSERT
|
|
2682 |
verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?");
|
|
2683 |
if (CheckCompressedOops) {
|
|
2684 |
Label ok;
|
|
2685 |
cbnz(r, ok);
|
|
2686 |
stop("null oop passed to encode_heap_oop_not_null");
|
|
2687 |
bind(ok);
|
|
2688 |
}
|
|
2689 |
#endif
|
|
2690 |
verify_oop(r, "broken oop in encode_heap_oop_not_null");
|
|
2691 |
if (Universe::narrow_oop_base() != NULL) {
|
|
2692 |
sub(r, r, rheapbase);
|
|
2693 |
}
|
|
2694 |
if (Universe::narrow_oop_shift() != 0) {
|
|
2695 |
assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
|
|
2696 |
lsr(r, r, LogMinObjAlignmentInBytes);
|
|
2697 |
}
|
|
2698 |
}
|
|
2699 |
|
|
2700 |
void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
|
|
2701 |
#ifdef ASSERT
|
|
2702 |
verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?");
|
|
2703 |
if (CheckCompressedOops) {
|
|
2704 |
Label ok;
|
|
2705 |
cbnz(src, ok);
|
|
2706 |
stop("null oop passed to encode_heap_oop_not_null2");
|
|
2707 |
bind(ok);
|
|
2708 |
}
|
|
2709 |
#endif
|
|
2710 |
verify_oop(src, "broken oop in encode_heap_oop_not_null2");
|
|
2711 |
|
|
2712 |
Register data = src;
|
|
2713 |
if (Universe::narrow_oop_base() != NULL) {
|
|
2714 |
sub(dst, src, rheapbase);
|
|
2715 |
data = dst;
|
|
2716 |
}
|
|
2717 |
if (Universe::narrow_oop_shift() != 0) {
|
|
2718 |
assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
|
|
2719 |
lsr(dst, data, LogMinObjAlignmentInBytes);
|
|
2720 |
data = dst;
|
|
2721 |
}
|
|
2722 |
if (data == src)
|
|
2723 |
mov(dst, src);
|
|
2724 |
}
|
|
2725 |
|
|
2726 |
void MacroAssembler::decode_heap_oop(Register d, Register s) {
|
|
2727 |
#ifdef ASSERT
|
|
2728 |
verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
|
|
2729 |
#endif
|
|
2730 |
if (Universe::narrow_oop_base() == NULL) {
|
|
2731 |
if (Universe::narrow_oop_shift() != 0 || d != s) {
|
|
2732 |
lsl(d, s, Universe::narrow_oop_shift());
|
|
2733 |
}
|
|
2734 |
} else {
|
|
2735 |
Label done;
|
|
2736 |
if (d != s)
|
|
2737 |
mov(d, s);
|
|
2738 |
cbz(s, done);
|
|
2739 |
add(d, rheapbase, s, Assembler::LSL, LogMinObjAlignmentInBytes);
|
|
2740 |
bind(done);
|
|
2741 |
}
|
|
2742 |
verify_oop(d, "broken oop in decode_heap_oop");
|
|
2743 |
}
|
|
2744 |
|
|
2745 |
void MacroAssembler::decode_heap_oop_not_null(Register r) {
|
|
2746 |
assert (UseCompressedOops, "should only be used for compressed headers");
|
|
2747 |
assert (Universe::heap() != NULL, "java heap should be initialized");
|
|
2748 |
// Cannot assert, unverified entry point counts instructions (see .ad file)
|
|
2749 |
// vtableStubs also counts instructions in pd_code_size_limit.
|
|
2750 |
// Also do not verify_oop as this is called by verify_oop.
|
|
2751 |
if (Universe::narrow_oop_shift() != 0) {
|
|
2752 |
assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
|
|
2753 |
if (Universe::narrow_oop_base() != NULL) {
|
|
2754 |
add(r, rheapbase, r, Assembler::LSL, LogMinObjAlignmentInBytes);
|
|
2755 |
} else {
|
|
2756 |
add(r, zr, r, Assembler::LSL, LogMinObjAlignmentInBytes);
|
|
2757 |
}
|
|
2758 |
} else {
|
|
2759 |
assert (Universe::narrow_oop_base() == NULL, "sanity");
|
|
2760 |
}
|
|
2761 |
}
|
|
2762 |
|
|
2763 |
void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
|
|
2764 |
assert (UseCompressedOops, "should only be used for compressed headers");
|
|
2765 |
assert (Universe::heap() != NULL, "java heap should be initialized");
|
|
2766 |
// Cannot assert, unverified entry point counts instructions (see .ad file)
|
|
2767 |
// vtableStubs also counts instructions in pd_code_size_limit.
|
|
2768 |
// Also do not verify_oop as this is called by verify_oop.
|
|
2769 |
if (Universe::narrow_oop_shift() != 0) {
|
|
2770 |
assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
|
|
2771 |
if (Universe::narrow_oop_base() != NULL) {
|
|
2772 |
add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes);
|
|
2773 |
} else {
|
|
2774 |
add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes);
|
|
2775 |
}
|
|
2776 |
} else {
|
|
2777 |
assert (Universe::narrow_oop_base() == NULL, "sanity");
|
|
2778 |
if (dst != src) {
|
|
2779 |
mov(dst, src);
|
|
2780 |
}
|
|
2781 |
}
|
|
2782 |
}
|
|
2783 |
|
|
2784 |
void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
|
|
2785 |
if (Universe::narrow_klass_base() == NULL) {
|
|
2786 |
if (Universe::narrow_klass_shift() != 0) {
|
|
2787 |
assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
|
|
2788 |
lsr(dst, src, LogKlassAlignmentInBytes);
|
|
2789 |
} else {
|
|
2790 |
if (dst != src) mov(dst, src);
|
|
2791 |
}
|
|
2792 |
return;
|
|
2793 |
}
|
|
2794 |
|
|
2795 |
if (use_XOR_for_compressed_class_base) {
|
|
2796 |
if (Universe::narrow_klass_shift() != 0) {
|
|
2797 |
eor(dst, src, (uint64_t)Universe::narrow_klass_base());
|
|
2798 |
lsr(dst, dst, LogKlassAlignmentInBytes);
|
|
2799 |
} else {
|
|
2800 |
eor(dst, src, (uint64_t)Universe::narrow_klass_base());
|
|
2801 |
}
|
|
2802 |
return;
|
|
2803 |
}
|
|
2804 |
|
|
2805 |
if (((uint64_t)Universe::narrow_klass_base() & 0xffffffff) == 0
|
|
2806 |
&& Universe::narrow_klass_shift() == 0) {
|
|
2807 |
movw(dst, src);
|
|
2808 |
return;
|
|
2809 |
}
|
|
2810 |
|
|
2811 |
#ifdef ASSERT
|
|
2812 |
verify_heapbase("MacroAssembler::encode_klass_not_null2: heap base corrupted?");
|
|
2813 |
#endif
|
|
2814 |
|
|
2815 |
Register rbase = dst;
|
|
2816 |
if (dst == src) rbase = rheapbase;
|
|
2817 |
mov(rbase, (uint64_t)Universe::narrow_klass_base());
|
|
2818 |
sub(dst, src, rbase);
|
|
2819 |
if (Universe::narrow_klass_shift() != 0) {
|
|
2820 |
assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
|
|
2821 |
lsr(dst, dst, LogKlassAlignmentInBytes);
|
|
2822 |
}
|
|
2823 |
if (dst == src) reinit_heapbase();
|
|
2824 |
}
|
|
2825 |
|
|
2826 |
void MacroAssembler::encode_klass_not_null(Register r) {
|
|
2827 |
encode_klass_not_null(r, r);
|
|
2828 |
}
|
|
2829 |
|
|
2830 |
void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
|
|
2831 |
Register rbase = dst;
|
|
2832 |
assert (UseCompressedClassPointers, "should only be used for compressed headers");
|
|
2833 |
|
|
2834 |
if (Universe::narrow_klass_base() == NULL) {
|
|
2835 |
if (Universe::narrow_klass_shift() != 0) {
|
|
2836 |
assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
|
|
2837 |
lsl(dst, src, LogKlassAlignmentInBytes);
|
|
2838 |
} else {
|
|
2839 |
if (dst != src) mov(dst, src);
|
|
2840 |
}
|
|
2841 |
return;
|
|
2842 |
}
|
|
2843 |
|
|
2844 |
if (use_XOR_for_compressed_class_base) {
|
|
2845 |
if (Universe::narrow_klass_shift() != 0) {
|
|
2846 |
lsl(dst, src, LogKlassAlignmentInBytes);
|
|
2847 |
eor(dst, dst, (uint64_t)Universe::narrow_klass_base());
|
|
2848 |
} else {
|
|
2849 |
eor(dst, src, (uint64_t)Universe::narrow_klass_base());
|
|
2850 |
}
|
|
2851 |
return;
|
|
2852 |
}
|
|
2853 |
|
|
2854 |
if (((uint64_t)Universe::narrow_klass_base() & 0xffffffff) == 0
|
|
2855 |
&& Universe::narrow_klass_shift() == 0) {
|
|
2856 |
if (dst != src)
|
|
2857 |
movw(dst, src);
|
|
2858 |
movk(dst, (uint64_t)Universe::narrow_klass_base() >> 32, 32);
|
|
2859 |
return;
|
|
2860 |
}
|
|
2861 |
|
|
2862 |
// Cannot assert, unverified entry point counts instructions (see .ad file)
|
|
2863 |
// vtableStubs also counts instructions in pd_code_size_limit.
|
|
2864 |
// Also do not verify_oop as this is called by verify_oop.
|
|
2865 |
if (dst == src) rbase = rheapbase;
|
|
2866 |
mov(rbase, (uint64_t)Universe::narrow_klass_base());
|
|
2867 |
if (Universe::narrow_klass_shift() != 0) {
|
|
2868 |
assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
|
|
2869 |
add(dst, rbase, src, Assembler::LSL, LogKlassAlignmentInBytes);
|
|
2870 |
} else {
|
|
2871 |
add(dst, rbase, src);
|
|
2872 |
}
|
|
2873 |
if (dst == src) reinit_heapbase();
|
|
2874 |
}
|
|
2875 |
|
|
2876 |
void MacroAssembler::decode_klass_not_null(Register r) {
|
|
2877 |
decode_klass_not_null(r, r);
|
|
2878 |
}
|
|
2879 |
|
|
2880 |
void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
|
|
2881 |
assert (UseCompressedOops, "should only be used for compressed oops");
|
|
2882 |
assert (Universe::heap() != NULL, "java heap should be initialized");
|
|
2883 |
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
|
2884 |
|
|
2885 |
int oop_index = oop_recorder()->find_index(obj);
|
|
2886 |
assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "should be real oop");
|
|
2887 |
|
|
2888 |
InstructionMark im(this);
|
|
2889 |
RelocationHolder rspec = oop_Relocation::spec(oop_index);
|
|
2890 |
code_section()->relocate(inst_mark(), rspec);
|
|
2891 |
movz(dst, 0xDEAD, 16);
|
|
2892 |
movk(dst, 0xBEEF);
|
|
2893 |
}
|
|
2894 |
|
|
2895 |
void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
|
|
2896 |
assert (UseCompressedClassPointers, "should only be used for compressed headers");
|
|
2897 |
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
|
2898 |
int index = oop_recorder()->find_index(k);
|
|
2899 |
assert(! Universe::heap()->is_in_reserved(k), "should not be an oop");
|
|
2900 |
|
|
2901 |
InstructionMark im(this);
|
|
2902 |
RelocationHolder rspec = metadata_Relocation::spec(index);
|
|
2903 |
code_section()->relocate(inst_mark(), rspec);
|
|
2904 |
narrowKlass nk = Klass::encode_klass(k);
|
|
2905 |
movz(dst, (nk >> 16), 16);
|
|
2906 |
movk(dst, nk & 0xffff);
|
|
2907 |
}
|
|
2908 |
|
|
2909 |
void MacroAssembler::load_heap_oop(Register dst, Address src)
|
|
2910 |
{
|
|
2911 |
if (UseCompressedOops) {
|
|
2912 |
ldrw(dst, src);
|
|
2913 |
decode_heap_oop(dst);
|
|
2914 |
} else {
|
|
2915 |
ldr(dst, src);
|
|
2916 |
}
|
|
2917 |
}
|
|
2918 |
|
|
2919 |
void MacroAssembler::load_heap_oop_not_null(Register dst, Address src)
|
|
2920 |
{
|
|
2921 |
if (UseCompressedOops) {
|
|
2922 |
ldrw(dst, src);
|
|
2923 |
decode_heap_oop_not_null(dst);
|
|
2924 |
} else {
|
|
2925 |
ldr(dst, src);
|
|
2926 |
}
|
|
2927 |
}
|
|
2928 |
|
|
2929 |
void MacroAssembler::store_heap_oop(Address dst, Register src) {
|
|
2930 |
if (UseCompressedOops) {
|
|
2931 |
assert(!dst.uses(src), "not enough registers");
|
|
2932 |
encode_heap_oop(src);
|
|
2933 |
strw(src, dst);
|
|
2934 |
} else
|
|
2935 |
str(src, dst);
|
|
2936 |
}
|
|
2937 |
|
|
2938 |
// Used for storing NULLs.
|
|
2939 |
void MacroAssembler::store_heap_oop_null(Address dst) {
|
|
2940 |
if (UseCompressedOops) {
|
|
2941 |
strw(zr, dst);
|
|
2942 |
} else
|
|
2943 |
str(zr, dst);
|
|
2944 |
}
|
|
2945 |
|
|
2946 |
#if INCLUDE_ALL_GCS
|
|
2947 |
void MacroAssembler::g1_write_barrier_pre(Register obj,
|
|
2948 |
Register pre_val,
|
|
2949 |
Register thread,
|
|
2950 |
Register tmp,
|
|
2951 |
bool tosca_live,
|
|
2952 |
bool expand_call) {
|
|
2953 |
// If expand_call is true then we expand the call_VM_leaf macro
|
|
2954 |
// directly to skip generating the check by
|
|
2955 |
// InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
|
|
2956 |
|
|
2957 |
assert(thread == rthread, "must be");
|
|
2958 |
|
|
2959 |
Label done;
|
|
2960 |
Label runtime;
|
|
2961 |
|
|
2962 |
assert(pre_val != noreg, "check this code");
|
|
2963 |
|
|
2964 |
if (obj != noreg)
|
|
2965 |
assert_different_registers(obj, pre_val, tmp);
|
|
2966 |
|
|
2967 |
Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
|
|
2968 |
PtrQueue::byte_offset_of_active()));
|
|
2969 |
Address index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
|
|
2970 |
PtrQueue::byte_offset_of_index()));
|
|
2971 |
Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
|
|
2972 |
PtrQueue::byte_offset_of_buf()));
|
|
2973 |
|
|
2974 |
|
|
2975 |
// Is marking active?
|
|
2976 |
if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
|
|
2977 |
ldrw(tmp, in_progress);
|
|
2978 |
} else {
|
|
2979 |
assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
|
|
2980 |
ldrb(tmp, in_progress);
|
|
2981 |
}
|
|
2982 |
cbzw(tmp, done);
|
|
2983 |
|
|
2984 |
// Do we need to load the previous value?
|
|
2985 |
if (obj != noreg) {
|
|
2986 |
load_heap_oop(pre_val, Address(obj, 0));
|
|
2987 |
}
|
|
2988 |
|
|
2989 |
// Is the previous value null?
|
|
2990 |
cbz(pre_val, done);
|
|
2991 |
|
|
2992 |
// Can we store original value in the thread's buffer?
|
|
2993 |
// Is index == 0?
|
|
2994 |
// (The index field is typed as size_t.)
|
|
2995 |
|
|
2996 |
ldr(tmp, index); // tmp := *index_adr
|
|
2997 |
cbz(tmp, runtime); // tmp == 0?
|
|
2998 |
// If yes, goto runtime
|
|
2999 |
|
|
3000 |
sub(tmp, tmp, wordSize); // tmp := tmp - wordSize
|
|
3001 |
str(tmp, index); // *index_adr := tmp
|
|
3002 |
ldr(rscratch1, buffer);
|
|
3003 |
add(tmp, tmp, rscratch1); // tmp := tmp + *buffer_adr
|
|
3004 |
|
|
3005 |
// Record the previous value
|
|
3006 |
str(pre_val, Address(tmp, 0));
|
|
3007 |
b(done);
|
|
3008 |
|
|
3009 |
bind(runtime);
|
|
3010 |
// save the live input values
|
|
3011 |
push(r0->bit(tosca_live) | obj->bit(obj != noreg) | pre_val->bit(true), sp);
|
|
3012 |
|
|
3013 |
// Calling the runtime using the regular call_VM_leaf mechanism generates
|
|
3014 |
// code (generated by InterpreterMacroAssember::call_VM_leaf_base)
|
|
3015 |
// that checks that the *(rfp+frame::interpreter_frame_last_sp) == NULL.
|
|
3016 |
//
|
|
3017 |
// If we care generating the pre-barrier without a frame (e.g. in the
|
|
3018 |
// intrinsified Reference.get() routine) then ebp might be pointing to
|
|
3019 |
// the caller frame and so this check will most likely fail at runtime.
|
|
3020 |
//
|
|
3021 |
// Expanding the call directly bypasses the generation of the check.
|
|
3022 |
// So when we do not have have a full interpreter frame on the stack
|
|
3023 |
// expand_call should be passed true.
|
|
3024 |
|
|
3025 |
if (expand_call) {
|
|
3026 |
assert(pre_val != c_rarg1, "smashed arg");
|
|
3027 |
pass_arg1(this, thread);
|
|
3028 |
pass_arg0(this, pre_val);
|
|
3029 |
MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), 2);
|
|
3030 |
} else {
|
|
3031 |
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
|
|
3032 |
}
|
|
3033 |
|
|
3034 |
pop(r0->bit(tosca_live) | obj->bit(obj != noreg) | pre_val->bit(true), sp);
|
|
3035 |
|
|
3036 |
bind(done);
|
|
3037 |
}
|
|
3038 |
|
|
3039 |
void MacroAssembler::g1_write_barrier_post(Register store_addr,
|
|
3040 |
Register new_val,
|
|
3041 |
Register thread,
|
|
3042 |
Register tmp,
|
|
3043 |
Register tmp2) {
|
|
3044 |
assert(thread == rthread, "must be");
|
|
3045 |
|
|
3046 |
Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
|
|
3047 |
PtrQueue::byte_offset_of_index()));
|
|
3048 |
Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
|
|
3049 |
PtrQueue::byte_offset_of_buf()));
|
|
3050 |
|
|
3051 |
BarrierSet* bs = Universe::heap()->barrier_set();
|
|
3052 |
CardTableModRefBS* ct = (CardTableModRefBS*)bs;
|
|
3053 |
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
|
|
3054 |
|
|
3055 |
Label done;
|
|
3056 |
Label runtime;
|
|
3057 |
|
|
3058 |
// Does store cross heap regions?
|
|
3059 |
|
|
3060 |
eor(tmp, store_addr, new_val);
|
|
3061 |
lsr(tmp, tmp, HeapRegion::LogOfHRGrainBytes);
|
|
3062 |
cbz(tmp, done);
|
|
3063 |
|
|
3064 |
// crosses regions, storing NULL?
|
|
3065 |
|
|
3066 |
cbz(new_val, done);
|
|
3067 |
|
|
3068 |
// storing region crossing non-NULL, is card already dirty?
|
|
3069 |
|
|
3070 |
ExternalAddress cardtable((address) ct->byte_map_base);
|
|
3071 |
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
|
|
3072 |
const Register card_addr = tmp;
|
|
3073 |
|
|
3074 |
lsr(card_addr, store_addr, CardTableModRefBS::card_shift);
|
|
3075 |
|
|
3076 |
unsigned long offset;
|
|
3077 |
adrp(tmp2, cardtable, offset);
|
|
3078 |
|
|
3079 |
// get the address of the card
|
|
3080 |
add(card_addr, card_addr, tmp2);
|
|
3081 |
ldrb(tmp2, Address(card_addr, offset));
|
|
3082 |
cmpw(tmp2, (int)G1SATBCardTableModRefBS::g1_young_card_val());
|
|
3083 |
br(Assembler::EQ, done);
|
|
3084 |
|
|
3085 |
assert((int)CardTableModRefBS::dirty_card_val() == 0, "must be 0");
|
|
3086 |
|
|
3087 |
membar(Assembler::StoreLoad);
|
|
3088 |
|
|
3089 |
ldrb(tmp2, Address(card_addr, offset));
|
|
3090 |
cbzw(tmp2, done);
|
|
3091 |
|
|
3092 |
// storing a region crossing, non-NULL oop, card is clean.
|
|
3093 |
// dirty card and log.
|
|
3094 |
|
|
3095 |
strb(zr, Address(card_addr, offset));
|
|
3096 |
|
|
3097 |
ldr(rscratch1, queue_index);
|
|
3098 |
cbz(rscratch1, runtime);
|
|
3099 |
sub(rscratch1, rscratch1, wordSize);
|
|
3100 |
str(rscratch1, queue_index);
|
|
3101 |
|
|
3102 |
ldr(tmp2, buffer);
|
|
3103 |
str(card_addr, Address(tmp2, rscratch1));
|
|
3104 |
b(done);
|
|
3105 |
|
|
3106 |
bind(runtime);
|
|
3107 |
// save the live input values
|
|
3108 |
push(store_addr->bit(true) | new_val->bit(true), sp);
|
|
3109 |
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
|
|
3110 |
pop(store_addr->bit(true) | new_val->bit(true), sp);
|
|
3111 |
|
|
3112 |
bind(done);
|
|
3113 |
}
|
|
3114 |
|
|
3115 |
#endif // INCLUDE_ALL_GCS
|
|
3116 |
|
|
3117 |
Address MacroAssembler::allocate_metadata_address(Metadata* obj) {
|
|
3118 |
assert(oop_recorder() != NULL, "this assembler needs a Recorder");
|
|
3119 |
int index = oop_recorder()->allocate_metadata_index(obj);
|
|
3120 |
RelocationHolder rspec = metadata_Relocation::spec(index);
|
|
3121 |
return Address((address)obj, rspec);
|
|
3122 |
}
|
|
3123 |
|
|
3124 |
// Move an oop into a register. immediate is true if we want
|
|
3125 |
// immediate instrcutions, i.e. we are not going to patch this
|
|
3126 |
// instruction while the code is being executed by another thread. In
|
|
3127 |
// that case we can use move immediates rather than the constant pool.
|
|
3128 |
void MacroAssembler::movoop(Register dst, jobject obj, bool immediate) {
|
|
3129 |
int oop_index;
|
|
3130 |
if (obj == NULL) {
|
|
3131 |
oop_index = oop_recorder()->allocate_oop_index(obj);
|
|
3132 |
} else {
|
|
3133 |
oop_index = oop_recorder()->find_index(obj);
|
|
3134 |
assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "should be real oop");
|
|
3135 |
}
|
|
3136 |
RelocationHolder rspec = oop_Relocation::spec(oop_index);
|
|
3137 |
if (! immediate) {
|
|
3138 |
address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address
|
|
3139 |
ldr_constant(dst, Address(dummy, rspec));
|
|
3140 |
} else
|
|
3141 |
mov(dst, Address((address)obj, rspec));
|
|
3142 |
}
|
|
3143 |
|
|
3144 |
// Move a metadata address into a register.
|
|
3145 |
void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
|
|
3146 |
int oop_index;
|
|
3147 |
if (obj == NULL) {
|
|
3148 |
oop_index = oop_recorder()->allocate_metadata_index(obj);
|
|
3149 |
} else {
|
|
3150 |
oop_index = oop_recorder()->find_index(obj);
|
|
3151 |
}
|
|
3152 |
RelocationHolder rspec = metadata_Relocation::spec(oop_index);
|
|
3153 |
mov(dst, Address((address)obj, rspec));
|
|
3154 |
}
|
|
3155 |
|
|
3156 |
Address MacroAssembler::constant_oop_address(jobject obj) {
|
|
3157 |
assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
|
3158 |
assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop");
|
|
3159 |
int oop_index = oop_recorder()->find_index(obj);
|
|
3160 |
return Address((address)obj, oop_Relocation::spec(oop_index));
|
|
3161 |
}
|
|
3162 |
|
|
3163 |
// Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
|
|
3164 |
void MacroAssembler::tlab_allocate(Register obj,
|
|
3165 |
Register var_size_in_bytes,
|
|
3166 |
int con_size_in_bytes,
|
|
3167 |
Register t1,
|
|
3168 |
Register t2,
|
|
3169 |
Label& slow_case) {
|
|
3170 |
assert_different_registers(obj, t2);
|
|
3171 |
assert_different_registers(obj, var_size_in_bytes);
|
|
3172 |
Register end = t2;
|
|
3173 |
|
|
3174 |
// verify_tlab();
|
|
3175 |
|
|
3176 |
ldr(obj, Address(rthread, JavaThread::tlab_top_offset()));
|
|
3177 |
if (var_size_in_bytes == noreg) {
|
|
3178 |
lea(end, Address(obj, con_size_in_bytes));
|
|
3179 |
} else {
|
|
3180 |
lea(end, Address(obj, var_size_in_bytes));
|
|
3181 |
}
|
|
3182 |
ldr(rscratch1, Address(rthread, JavaThread::tlab_end_offset()));
|
|
3183 |
cmp(end, rscratch1);
|
|
3184 |
br(Assembler::HI, slow_case);
|
|
3185 |
|
|
3186 |
// update the tlab top pointer
|
|
3187 |
str(end, Address(rthread, JavaThread::tlab_top_offset()));
|
|
3188 |
|
|
3189 |
// recover var_size_in_bytes if necessary
|
|
3190 |
if (var_size_in_bytes == end) {
|
|
3191 |
sub(var_size_in_bytes, var_size_in_bytes, obj);
|
|
3192 |
}
|
|
3193 |
// verify_tlab();
|
|
3194 |
}
|
|
3195 |
|
|
3196 |
// Preserves r19, and r3.
|
|
3197 |
Register MacroAssembler::tlab_refill(Label& retry,
|
|
3198 |
Label& try_eden,
|
|
3199 |
Label& slow_case) {
|
|
3200 |
Register top = r0;
|
|
3201 |
Register t1 = r2;
|
|
3202 |
Register t2 = r4;
|
|
3203 |
assert_different_registers(top, rthread, t1, t2, /* preserve: */ r19, r3);
|
|
3204 |
Label do_refill, discard_tlab;
|
|
3205 |
|
|
3206 |
if (!Universe::heap()->supports_inline_contig_alloc()) {
|
|
3207 |
// No allocation in the shared eden.
|
|
3208 |
b(slow_case);
|
|
3209 |
}
|
|
3210 |
|
|
3211 |
ldr(top, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
|
|
3212 |
ldr(t1, Address(rthread, in_bytes(JavaThread::tlab_end_offset())));
|
|
3213 |
|
|
3214 |
// calculate amount of free space
|
|
3215 |
sub(t1, t1, top);
|
|
3216 |
lsr(t1, t1, LogHeapWordSize);
|
|
3217 |
|
|
3218 |
// Retain tlab and allocate object in shared space if
|
|
3219 |
// the amount free in the tlab is too large to discard.
|
|
3220 |
|
|
3221 |
ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_refill_waste_limit_offset())));
|
|
3222 |
cmp(t1, rscratch1);
|
|
3223 |
br(Assembler::LE, discard_tlab);
|
|
3224 |
|
|
3225 |
// Retain
|
|
3226 |
// ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_refill_waste_limit_offset())));
|
|
3227 |
mov(t2, (int32_t) ThreadLocalAllocBuffer::refill_waste_limit_increment());
|
|
3228 |
add(rscratch1, rscratch1, t2);
|
|
3229 |
str(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_refill_waste_limit_offset())));
|
|
3230 |
|
|
3231 |
if (TLABStats) {
|
|
3232 |
// increment number of slow_allocations
|
|
3233 |
addmw(Address(rthread, in_bytes(JavaThread::tlab_slow_allocations_offset())),
|
|
3234 |
1, rscratch1);
|
|
3235 |
}
|
|
3236 |
b(try_eden);
|
|
3237 |
|
|
3238 |
bind(discard_tlab);
|
|
3239 |
if (TLABStats) {
|
|
3240 |
// increment number of refills
|
|
3241 |
addmw(Address(rthread, in_bytes(JavaThread::tlab_number_of_refills_offset())), 1,
|
|
3242 |
rscratch1);
|
|
3243 |
// accumulate wastage -- t1 is amount free in tlab
|
|
3244 |
addmw(Address(rthread, in_bytes(JavaThread::tlab_fast_refill_waste_offset())), t1,
|
|
3245 |
rscratch1);
|
|
3246 |
}
|
|
3247 |
|
|
3248 |
// if tlab is currently allocated (top or end != null) then
|
|
3249 |
// fill [top, end + alignment_reserve) with array object
|
|
3250 |
cbz(top, do_refill);
|
|
3251 |
|
|
3252 |
// set up the mark word
|
|
3253 |
mov(rscratch1, (intptr_t)markOopDesc::prototype()->copy_set_hash(0x2));
|
|
3254 |
str(rscratch1, Address(top, oopDesc::mark_offset_in_bytes()));
|
|
3255 |
// set the length to the remaining space
|
|
3256 |
sub(t1, t1, typeArrayOopDesc::header_size(T_INT));
|
|
3257 |
add(t1, t1, (int32_t)ThreadLocalAllocBuffer::alignment_reserve());
|
|
3258 |
lsl(t1, t1, log2_intptr(HeapWordSize/sizeof(jint)));
|
|
3259 |
strw(t1, Address(top, arrayOopDesc::length_offset_in_bytes()));
|
|
3260 |
// set klass to intArrayKlass
|
|
3261 |
{
|
|
3262 |
unsigned long offset;
|
|
3263 |
// dubious reloc why not an oop reloc?
|
|
3264 |
adrp(rscratch1, ExternalAddress((address)Universe::intArrayKlassObj_addr()),
|
|
3265 |
offset);
|
|
3266 |
ldr(t1, Address(rscratch1, offset));
|
|
3267 |
}
|
|
3268 |
// store klass last. concurrent gcs assumes klass length is valid if
|
|
3269 |
// klass field is not null.
|
|
3270 |
store_klass(top, t1);
|
|
3271 |
|
|
3272 |
mov(t1, top);
|
|
3273 |
ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset())));
|
|
3274 |
sub(t1, t1, rscratch1);
|
|
3275 |
incr_allocated_bytes(rthread, t1, 0, rscratch1);
|
|
3276 |
|
|
3277 |
// refill the tlab with an eden allocation
|
|
3278 |
bind(do_refill);
|
|
3279 |
ldr(t1, Address(rthread, in_bytes(JavaThread::tlab_size_offset())));
|
|
3280 |
lsl(t1, t1, LogHeapWordSize);
|
|
3281 |
// allocate new tlab, address returned in top
|
|
3282 |
eden_allocate(top, t1, 0, t2, slow_case);
|
|
3283 |
|
|
3284 |
// Check that t1 was preserved in eden_allocate.
|
|
3285 |
#ifdef ASSERT
|
|
3286 |
if (UseTLAB) {
|
|
3287 |
Label ok;
|
|
3288 |
Register tsize = r4;
|
|
3289 |
assert_different_registers(tsize, rthread, t1);
|
|
3290 |
str(tsize, Address(pre(sp, -16)));
|
|
3291 |
ldr(tsize, Address(rthread, in_bytes(JavaThread::tlab_size_offset())));
|
|
3292 |
lsl(tsize, tsize, LogHeapWordSize);
|
|
3293 |
cmp(t1, tsize);
|
|
3294 |
br(Assembler::EQ, ok);
|
|
3295 |
STOP("assert(t1 != tlab size)");
|
|
3296 |
should_not_reach_here();
|
|
3297 |
|
|
3298 |
bind(ok);
|
|
3299 |
ldr(tsize, Address(post(sp, 16)));
|
|
3300 |
}
|
|
3301 |
#endif
|
|
3302 |
str(top, Address(rthread, in_bytes(JavaThread::tlab_start_offset())));
|
|
3303 |
str(top, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
|
|
3304 |
add(top, top, t1);
|
|
3305 |
sub(top, top, (int32_t)ThreadLocalAllocBuffer::alignment_reserve_in_bytes());
|
|
3306 |
str(top, Address(rthread, in_bytes(JavaThread::tlab_end_offset())));
|
|
3307 |
verify_tlab();
|
|
3308 |
b(retry);
|
|
3309 |
|
|
3310 |
return rthread; // for use by caller
|
|
3311 |
}
|
|
3312 |
|
|
3313 |
// Defines obj, preserves var_size_in_bytes
|
|
3314 |
void MacroAssembler::eden_allocate(Register obj,
|
|
3315 |
Register var_size_in_bytes,
|
|
3316 |
int con_size_in_bytes,
|
|
3317 |
Register t1,
|
|
3318 |
Label& slow_case) {
|
|
3319 |
assert_different_registers(obj, var_size_in_bytes, t1);
|
|
3320 |
if (!Universe::heap()->supports_inline_contig_alloc()) {
|
|
3321 |
b(slow_case);
|
|
3322 |
} else {
|
|
3323 |
Register end = t1;
|
|
3324 |
Register heap_end = rscratch2;
|
|
3325 |
Label retry;
|
|
3326 |
bind(retry);
|
|
3327 |
{
|
|
3328 |
unsigned long offset;
|
|
3329 |
adrp(rscratch1, ExternalAddress((address) Universe::heap()->end_addr()), offset);
|
|
3330 |
ldr(heap_end, Address(rscratch1, offset));
|
|
3331 |
}
|
|
3332 |
|
|
3333 |
ExternalAddress heap_top((address) Universe::heap()->top_addr());
|
|
3334 |
|
|
3335 |
// Get the current top of the heap
|
|
3336 |
{
|
|
3337 |
unsigned long offset;
|
|
3338 |
adrp(rscratch1, heap_top, offset);
|
|
3339 |
// Use add() here after ARDP, rather than lea().
|
|
3340 |
// lea() does not generate anything if its offset is zero.
|
|
3341 |
// However, relocs expect to find either an ADD or a load/store
|
|
3342 |
// insn after an ADRP. add() always generates an ADD insn, even
|
|
3343 |
// for add(Rn, Rn, 0).
|
|
3344 |
add(rscratch1, rscratch1, offset);
|
|
3345 |
ldaxr(obj, rscratch1);
|
|
3346 |
}
|
|
3347 |
|
|
3348 |
// Adjust it my the size of our new object
|
|
3349 |
if (var_size_in_bytes == noreg) {
|
|
3350 |
lea(end, Address(obj, con_size_in_bytes));
|
|
3351 |
} else {
|
|
3352 |
lea(end, Address(obj, var_size_in_bytes));
|
|
3353 |
}
|
|
3354 |
|
|
3355 |
// if end < obj then we wrapped around high memory
|
|
3356 |
cmp(end, obj);
|
|
3357 |
br(Assembler::LO, slow_case);
|
|
3358 |
|
|
3359 |
cmp(end, heap_end);
|
|
3360 |
br(Assembler::HI, slow_case);
|
|
3361 |
|
|
3362 |
// If heap_top hasn't been changed by some other thread, update it.
|
|
3363 |
stlxr(rscratch1, end, rscratch1);
|
|
3364 |
cbnzw(rscratch1, retry);
|
|
3365 |
}
|
|
3366 |
}
|
|
3367 |
|
|
3368 |
void MacroAssembler::verify_tlab() {
|
|
3369 |
#ifdef ASSERT
|
|
3370 |
if (UseTLAB && VerifyOops) {
|
|
3371 |
Label next, ok;
|
|
3372 |
|
|
3373 |
stp(rscratch2, rscratch1, Address(pre(sp, -16)));
|
|
3374 |
|
|
3375 |
ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
|
|
3376 |
ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset())));
|
|
3377 |
cmp(rscratch2, rscratch1);
|
|
3378 |
br(Assembler::HS, next);
|
|
3379 |
STOP("assert(top >= start)");
|
|
3380 |
should_not_reach_here();
|
|
3381 |
|
|
3382 |
bind(next);
|
|
3383 |
ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_end_offset())));
|
|
3384 |
ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_top_offset())));
|
|
3385 |
cmp(rscratch2, rscratch1);
|
|
3386 |
br(Assembler::HS, ok);
|
|
3387 |
STOP("assert(top <= end)");
|
|
3388 |
should_not_reach_here();
|
|
3389 |
|
|
3390 |
bind(ok);
|
|
3391 |
ldp(rscratch2, rscratch1, Address(post(sp, 16)));
|
|
3392 |
}
|
|
3393 |
#endif
|
|
3394 |
}
|
|
3395 |
|
|
3396 |
// Writes to stack successive pages until offset reached to check for
|
|
3397 |
// stack overflow + shadow pages. This clobbers tmp.
|
|
3398 |
void MacroAssembler::bang_stack_size(Register size, Register tmp) {
|
|
3399 |
assert_different_registers(tmp, size, rscratch1);
|
|
3400 |
mov(tmp, sp);
|
|
3401 |
// Bang stack for total size given plus shadow page size.
|
|
3402 |
// Bang one page at a time because large size can bang beyond yellow and
|
|
3403 |
// red zones.
|
|
3404 |
Label loop;
|
|
3405 |
mov(rscratch1, os::vm_page_size());
|
|
3406 |
bind(loop);
|
|
3407 |
lea(tmp, Address(tmp, -os::vm_page_size()));
|
|
3408 |
subsw(size, size, rscratch1);
|
|
3409 |
str(size, Address(tmp));
|
|
3410 |
br(Assembler::GT, loop);
|
|
3411 |
|
|
3412 |
// Bang down shadow pages too.
|
|
3413 |
// At this point, (tmp-0) is the last address touched, so don't
|
|
3414 |
// touch it again. (It was touched as (tmp-pagesize) but then tmp
|
|
3415 |
// was post-decremented.) Skip this address by starting at i=1, and
|
|
3416 |
// touch a few more pages below. N.B. It is important to touch all
|
|
3417 |
// the way down to and including i=StackShadowPages.
|
|
3418 |
for (int i = 0; i< StackShadowPages-1; i++) {
|
|
3419 |
// this could be any sized move but this is can be a debugging crumb
|
|
3420 |
// so the bigger the better.
|
|
3421 |
lea(tmp, Address(tmp, -os::vm_page_size()));
|
|
3422 |
str(size, Address(tmp));
|
|
3423 |
}
|
|
3424 |
}
|
|
3425 |
|
|
3426 |
|
|
3427 |
address MacroAssembler::read_polling_page(Register r, address page, relocInfo::relocType rtype) {
|
|
3428 |
unsigned long off;
|
|
3429 |
adrp(r, Address(page, rtype), off);
|
|
3430 |
InstructionMark im(this);
|
|
3431 |
code_section()->relocate(inst_mark(), rtype);
|
|
3432 |
ldrw(zr, Address(r, off));
|
|
3433 |
return inst_mark();
|
|
3434 |
}
|
|
3435 |
|
|
3436 |
address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype) {
|
|
3437 |
InstructionMark im(this);
|
|
3438 |
code_section()->relocate(inst_mark(), rtype);
|
|
3439 |
ldrw(zr, Address(r, 0));
|
|
3440 |
return inst_mark();
|
|
3441 |
}
|
|
3442 |
|
|
3443 |
void MacroAssembler::adrp(Register reg1, const Address &dest, unsigned long &byte_offset) {
|
|
3444 |
relocInfo::relocType rtype = dest.rspec().reloc()->type();
|
|
3445 |
if (uabs(pc() - dest.target()) >= (1LL << 32)) {
|
|
3446 |
guarantee(rtype == relocInfo::none
|
|
3447 |
|| rtype == relocInfo::external_word_type
|
|
3448 |
|| rtype == relocInfo::poll_type
|
|
3449 |
|| rtype == relocInfo::poll_return_type,
|
|
3450 |
"can only use a fixed address with an ADRP");
|
|
3451 |
// Out of range. This doesn't happen very often, but we have to
|
|
3452 |
// handle it
|
|
3453 |
mov(reg1, dest);
|
|
3454 |
byte_offset = 0;
|
|
3455 |
} else {
|
|
3456 |
InstructionMark im(this);
|
|
3457 |
code_section()->relocate(inst_mark(), dest.rspec());
|
|
3458 |
byte_offset = (uint64_t)dest.target() & 0xfff;
|
|
3459 |
_adrp(reg1, dest.target());
|
|
3460 |
}
|
|
3461 |
}
|
|
3462 |
|
|
3463 |
bool MacroAssembler::use_acq_rel_for_volatile_fields() {
|
|
3464 |
#ifdef PRODUCT
|
|
3465 |
return false;
|
|
3466 |
#else
|
|
3467 |
return UseAcqRelForVolatileFields;
|
|
3468 |
#endif
|
|
3469 |
}
|
|
3470 |
|
|
3471 |
void MacroAssembler::build_frame(int framesize) {
|
|
3472 |
if (framesize == 0) {
|
|
3473 |
// Is this even possible?
|
|
3474 |
stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
|
|
3475 |
} else if (framesize < ((1 << 9) + 2 * wordSize)) {
|
|
3476 |
sub(sp, sp, framesize);
|
|
3477 |
stp(rfp, lr, Address(sp, framesize - 2 * wordSize));
|
|
3478 |
} else {
|
|
3479 |
stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
|
|
3480 |
if (framesize < ((1 << 12) + 2 * wordSize))
|
|
3481 |
sub(sp, sp, framesize - 2 * wordSize);
|
|
3482 |
else {
|
|
3483 |
mov(rscratch1, framesize - 2 * wordSize);
|
|
3484 |
sub(sp, sp, rscratch1);
|
|
3485 |
}
|
|
3486 |
}
|
|
3487 |
}
|
|
3488 |
|
|
3489 |
void MacroAssembler::remove_frame(int framesize) {
|
|
3490 |
if (framesize == 0) {
|
|
3491 |
ldp(rfp, lr, Address(post(sp, 2 * wordSize)));
|
|
3492 |
} else if (framesize < ((1 << 9) + 2 * wordSize)) {
|
|
3493 |
ldp(rfp, lr, Address(sp, framesize - 2 * wordSize));
|
|
3494 |
add(sp, sp, framesize);
|
|
3495 |
} else {
|
|
3496 |
if (framesize < ((1 << 12) + 2 * wordSize))
|
|
3497 |
add(sp, sp, framesize - 2 * wordSize);
|
|
3498 |
else {
|
|
3499 |
mov(rscratch1, framesize - 2 * wordSize);
|
|
3500 |
add(sp, sp, rscratch1);
|
|
3501 |
}
|
|
3502 |
ldp(rfp, lr, Address(post(sp, 2 * wordSize)));
|
|
3503 |
}
|
|
3504 |
}
|
|
3505 |
|
|
3506 |
|
|
3507 |
// Search for str1 in str2 and return index or -1
|
|
3508 |
void MacroAssembler::string_indexof(Register str2, Register str1,
|
|
3509 |
Register cnt2, Register cnt1,
|
|
3510 |
Register tmp1, Register tmp2,
|
|
3511 |
Register tmp3, Register tmp4,
|
|
3512 |
int icnt1, Register result) {
|
|
3513 |
Label BM, LINEARSEARCH, DONE, NOMATCH, MATCH;
|
|
3514 |
|
|
3515 |
Register ch1 = rscratch1;
|
|
3516 |
Register ch2 = rscratch2;
|
|
3517 |
Register cnt1tmp = tmp1;
|
|
3518 |
Register cnt2tmp = tmp2;
|
|
3519 |
Register cnt1_neg = cnt1;
|
|
3520 |
Register cnt2_neg = cnt2;
|
|
3521 |
Register result_tmp = tmp4;
|
|
3522 |
|
|
3523 |
// Note, inline_string_indexOf() generates checks:
|
|
3524 |
// if (substr.count > string.count) return -1;
|
|
3525 |
// if (substr.count == 0) return 0;
|
|
3526 |
|
|
3527 |
// We have two strings, a source string in str2, cnt2 and a pattern string
|
|
3528 |
// in str1, cnt1. Find the 1st occurence of pattern in source or return -1.
|
|
3529 |
|
|
3530 |
// For larger pattern and source we use a simplified Boyer Moore algorithm.
|
|
3531 |
// With a small pattern and source we use linear scan.
|
|
3532 |
|
|
3533 |
if (icnt1 == -1) {
|
|
3534 |
cmp(cnt1, 256); // Use Linear Scan if cnt1 < 8 || cnt1 >= 256
|
|
3535 |
ccmp(cnt1, 8, 0b0000, LO); // Can't handle skip >= 256 because we use
|
|
3536 |
br(LO, LINEARSEARCH); // a byte array.
|
|
3537 |
cmp(cnt1, cnt2, LSR, 2); // Source must be 4 * pattern for BM
|
|
3538 |
br(HS, LINEARSEARCH);
|
|
3539 |
}
|
|
3540 |
|
|
3541 |
// The Boyer Moore alogorithm is based on the description here:-
|
|
3542 |
//
|
|
3543 |
// http://en.wikipedia.org/wiki/Boyer%E2%80%93Moore_string_search_algorithm
|
|
3544 |
//
|
|
3545 |
// This describes and algorithm with 2 shift rules. The 'Bad Character' rule
|
|
3546 |
// and the 'Good Suffix' rule.
|
|
3547 |
//
|
|
3548 |
// These rules are essentially heuristics for how far we can shift the
|
|
3549 |
// pattern along the search string.
|
|
3550 |
//
|
|
3551 |
// The implementation here uses the 'Bad Character' rule only because of the
|
|
3552 |
// complexity of initialisation for the 'Good Suffix' rule.
|
|
3553 |
//
|
|
3554 |
// This is also known as the Boyer-Moore-Horspool algorithm:-
|
|
3555 |
//
|
|
3556 |
// http://en.wikipedia.org/wiki/Boyer-Moore-Horspool_algorithm
|
|
3557 |
//
|
|
3558 |
// #define ASIZE 128
|
|
3559 |
//
|
|
3560 |
// int bm(unsigned char *x, int m, unsigned char *y, int n) {
|
|
3561 |
// int i, j;
|
|
3562 |
// unsigned c;
|
|
3563 |
// unsigned char bc[ASIZE];
|
|
3564 |
//
|
|
3565 |
// /* Preprocessing */
|
|
3566 |
// for (i = 0; i < ASIZE; ++i)
|
|
3567 |
// bc[i] = 0;
|
|
3568 |
// for (i = 0; i < m - 1; ) {
|
|
3569 |
// c = x[i];
|
|
3570 |
// ++i;
|
|
3571 |
// if (c < ASIZE) bc[c] = i;
|
|
3572 |
// }
|
|
3573 |
//
|
|
3574 |
// /* Searching */
|
|
3575 |
// j = 0;
|
|
3576 |
// while (j <= n - m) {
|
|
3577 |
// c = y[i+j];
|
|
3578 |
// if (x[m-1] == c)
|
|
3579 |
// for (i = m - 2; i >= 0 && x[i] == y[i + j]; --i);
|
|
3580 |
// if (i < 0) return j;
|
|
3581 |
// if (c < ASIZE)
|
|
3582 |
// j = j - bc[y[j+m-1]] + m;
|
|
3583 |
// else
|
|
3584 |
// j += 1; // Advance by 1 only if char >= ASIZE
|
|
3585 |
// }
|
|
3586 |
// }
|
|
3587 |
|
|
3588 |
if (icnt1 == -1) {
|
|
3589 |
BIND(BM);
|
|
3590 |
|
|
3591 |
Label ZLOOP, BCLOOP, BCSKIP, BMLOOPSTR2, BMLOOPSTR1, BMSKIP;
|
|
3592 |
Label BMADV, BMMATCH, BMCHECKEND;
|
|
3593 |
|
|
3594 |
Register cnt1end = tmp2;
|
|
3595 |
Register str2end = cnt2;
|
|
3596 |
Register skipch = tmp2;
|
|
3597 |
|
|
3598 |
// Restrict ASIZE to 128 to reduce stack space/initialisation.
|
|
3599 |
// The presence of chars >= ASIZE in the target string does not affect
|
|
3600 |
// performance, but we must be careful not to initialise them in the stack
|
|
3601 |
// array.
|
|
3602 |
// The presence of chars >= ASIZE in the source string may adversely affect
|
|
3603 |
// performance since we can only advance by one when we encounter one.
|
|
3604 |
|
|
3605 |
stp(zr, zr, pre(sp, -128));
|
|
3606 |
for (int i = 1; i < 8; i++)
|
|
3607 |
stp(zr, zr, Address(sp, i*16));
|
|
3608 |
|
|
3609 |
mov(cnt1tmp, 0);
|
|
3610 |
sub(cnt1end, cnt1, 1);
|
|
3611 |
BIND(BCLOOP);
|
|
3612 |
ldrh(ch1, Address(str1, cnt1tmp, Address::lsl(1)));
|
|
3613 |
cmp(ch1, 128);
|
|
3614 |
add(cnt1tmp, cnt1tmp, 1);
|
|
3615 |
br(HS, BCSKIP);
|
|
3616 |
strb(cnt1tmp, Address(sp, ch1));
|
|
3617 |
BIND(BCSKIP);
|
|
3618 |
cmp(cnt1tmp, cnt1end);
|
|
3619 |
br(LT, BCLOOP);
|
|
3620 |
|
|
3621 |
mov(result_tmp, str2);
|
|
3622 |
|
|
3623 |
sub(cnt2, cnt2, cnt1);
|
|
3624 |
add(str2end, str2, cnt2, LSL, 1);
|
|
3625 |
BIND(BMLOOPSTR2);
|
|
3626 |
sub(cnt1tmp, cnt1, 1);
|
|
3627 |
ldrh(ch1, Address(str1, cnt1tmp, Address::lsl(1)));
|
|
3628 |
ldrh(skipch, Address(str2, cnt1tmp, Address::lsl(1)));
|
|
3629 |
cmp(ch1, skipch);
|
|
3630 |
br(NE, BMSKIP);
|
|
3631 |
subs(cnt1tmp, cnt1tmp, 1);
|
|
3632 |
br(LT, BMMATCH);
|
|
3633 |
BIND(BMLOOPSTR1);
|
|
3634 |
ldrh(ch1, Address(str1, cnt1tmp, Address::lsl(1)));
|
|
3635 |
ldrh(ch2, Address(str2, cnt1tmp, Address::lsl(1)));
|
|
3636 |
cmp(ch1, ch2);
|
|
3637 |
br(NE, BMSKIP);
|
|
3638 |
subs(cnt1tmp, cnt1tmp, 1);
|
|
3639 |
br(GE, BMLOOPSTR1);
|
|
3640 |
BIND(BMMATCH);
|
|
3641 |
sub(result_tmp, str2, result_tmp);
|
|
3642 |
lsr(result, result_tmp, 1);
|
|
3643 |
add(sp, sp, 128);
|
|
3644 |
b(DONE);
|
|
3645 |
BIND(BMADV);
|
|
3646 |
add(str2, str2, 2);
|
|
3647 |
b(BMCHECKEND);
|
|
3648 |
BIND(BMSKIP);
|
|
3649 |
cmp(skipch, 128);
|
|
3650 |
br(HS, BMADV);
|
|
3651 |
ldrb(ch2, Address(sp, skipch));
|
|
3652 |
add(str2, str2, cnt1, LSL, 1);
|
|
3653 |
sub(str2, str2, ch2, LSL, 1);
|
|
3654 |
BIND(BMCHECKEND);
|
|
3655 |
cmp(str2, str2end);
|
|
3656 |
br(LE, BMLOOPSTR2);
|
|
3657 |
add(sp, sp, 128);
|
|
3658 |
b(NOMATCH);
|
|
3659 |
}
|
|
3660 |
|
|
3661 |
BIND(LINEARSEARCH);
|
|
3662 |
{
|
|
3663 |
Label DO1, DO2, DO3;
|
|
3664 |
|
|
3665 |
Register str2tmp = tmp2;
|
|
3666 |
Register first = tmp3;
|
|
3667 |
|
|
3668 |
if (icnt1 == -1)
|
|
3669 |
{
|
|
3670 |
Label DOSHORT, FIRST_LOOP, STR2_NEXT, STR1_LOOP, STR1_NEXT, LAST_WORD;
|
|
3671 |
|
|
3672 |
cmp(cnt1, 4);
|
|
3673 |
br(LT, DOSHORT);
|
|
3674 |
|
|
3675 |
sub(cnt2, cnt2, cnt1);
|
|
3676 |
sub(cnt1, cnt1, 4);
|
|
3677 |
mov(result_tmp, cnt2);
|
|
3678 |
|
|
3679 |
lea(str1, Address(str1, cnt1, Address::uxtw(1)));
|
|
3680 |
lea(str2, Address(str2, cnt2, Address::uxtw(1)));
|
|
3681 |
sub(cnt1_neg, zr, cnt1, LSL, 1);
|
|
3682 |
sub(cnt2_neg, zr, cnt2, LSL, 1);
|
|
3683 |
ldr(first, Address(str1, cnt1_neg));
|
|
3684 |
|
|
3685 |
BIND(FIRST_LOOP);
|
|
3686 |
ldr(ch2, Address(str2, cnt2_neg));
|
|
3687 |
cmp(first, ch2);
|
|
3688 |
br(EQ, STR1_LOOP);
|
|
3689 |
BIND(STR2_NEXT);
|
|
3690 |
adds(cnt2_neg, cnt2_neg, 2);
|
|
3691 |
br(LE, FIRST_LOOP);
|
|
3692 |
b(NOMATCH);
|
|
3693 |
|
|
3694 |
BIND(STR1_LOOP);
|
|
3695 |
adds(cnt1tmp, cnt1_neg, 8);
|
|
3696 |
add(cnt2tmp, cnt2_neg, 8);
|
|
3697 |
br(GE, LAST_WORD);
|
|
3698 |
|
|
3699 |
BIND(STR1_NEXT);
|
|
3700 |
ldr(ch1, Address(str1, cnt1tmp));
|
|
3701 |
ldr(ch2, Address(str2, cnt2tmp));
|
|
3702 |
cmp(ch1, ch2);
|
|
3703 |
br(NE, STR2_NEXT);
|
|
3704 |
adds(cnt1tmp, cnt1tmp, 8);
|
|
3705 |
add(cnt2tmp, cnt2tmp, 8);
|
|
3706 |
br(LT, STR1_NEXT);
|
|
3707 |
|
|
3708 |
BIND(LAST_WORD);
|
|
3709 |
ldr(ch1, Address(str1));
|
|
3710 |
sub(str2tmp, str2, cnt1_neg); // adjust to corresponding
|
|
3711 |
ldr(ch2, Address(str2tmp, cnt2_neg)); // word in str2
|
|
3712 |
cmp(ch1, ch2);
|
|
3713 |
br(NE, STR2_NEXT);
|
|
3714 |
b(MATCH);
|
|
3715 |
|
|
3716 |
BIND(DOSHORT);
|
|
3717 |
cmp(cnt1, 2);
|
|
3718 |
br(LT, DO1);
|
|
3719 |
br(GT, DO3);
|
|
3720 |
}
|
|
3721 |
|
|
3722 |
if (icnt1 == 4) {
|
|
3723 |
Label CH1_LOOP;
|
|
3724 |
|
|
3725 |
ldr(ch1, str1);
|
|
3726 |
sub(cnt2, cnt2, 4);
|
|
3727 |
mov(result_tmp, cnt2);
|
|
3728 |
lea(str2, Address(str2, cnt2, Address::uxtw(1)));
|
|
3729 |
sub(cnt2_neg, zr, cnt2, LSL, 1);
|
|
3730 |
|
|
3731 |
BIND(CH1_LOOP);
|
|
3732 |
ldr(ch2, Address(str2, cnt2_neg));
|
|
3733 |
cmp(ch1, ch2);
|
|
3734 |
br(EQ, MATCH);
|
|
3735 |
adds(cnt2_neg, cnt2_neg, 2);
|
|
3736 |
br(LE, CH1_LOOP);
|
|
3737 |
b(NOMATCH);
|
|
3738 |
}
|
|
3739 |
|
|
3740 |
if (icnt1 == -1 || icnt1 == 2) {
|
|
3741 |
Label CH1_LOOP;
|
|
3742 |
|
|
3743 |
BIND(DO2);
|
|
3744 |
ldrw(ch1, str1);
|
|
3745 |
sub(cnt2, cnt2, 2);
|
|
3746 |
mov(result_tmp, cnt2);
|
|
3747 |
lea(str2, Address(str2, cnt2, Address::uxtw(1)));
|
|
3748 |
sub(cnt2_neg, zr, cnt2, LSL, 1);
|
|
3749 |
|
|
3750 |
BIND(CH1_LOOP);
|
|
3751 |
ldrw(ch2, Address(str2, cnt2_neg));
|
|
3752 |
cmp(ch1, ch2);
|
|
3753 |
br(EQ, MATCH);
|
|
3754 |
adds(cnt2_neg, cnt2_neg, 2);
|
|
3755 |
br(LE, CH1_LOOP);
|
|
3756 |
b(NOMATCH);
|
|
3757 |
}
|
|
3758 |
|
|
3759 |
if (icnt1 == -1 || icnt1 == 3) {
|
|
3760 |
Label FIRST_LOOP, STR2_NEXT, STR1_LOOP;
|
|
3761 |
|
|
3762 |
BIND(DO3);
|
|
3763 |
ldrw(first, str1);
|
|
3764 |
ldrh(ch1, Address(str1, 4));
|
|
3765 |
|
|
3766 |
sub(cnt2, cnt2, 3);
|
|
3767 |
mov(result_tmp, cnt2);
|
|
3768 |
lea(str2, Address(str2, cnt2, Address::uxtw(1)));
|
|
3769 |
sub(cnt2_neg, zr, cnt2, LSL, 1);
|
|
3770 |
|
|
3771 |
BIND(FIRST_LOOP);
|
|
3772 |
ldrw(ch2, Address(str2, cnt2_neg));
|
|
3773 |
cmpw(first, ch2);
|
|
3774 |
br(EQ, STR1_LOOP);
|
|
3775 |
BIND(STR2_NEXT);
|
|
3776 |
adds(cnt2_neg, cnt2_neg, 2);
|
|
3777 |
br(LE, FIRST_LOOP);
|
|
3778 |
b(NOMATCH);
|
|
3779 |
|
|
3780 |
BIND(STR1_LOOP);
|
|
3781 |
add(cnt2tmp, cnt2_neg, 4);
|
|
3782 |
ldrh(ch2, Address(str2, cnt2tmp));
|
|
3783 |
cmp(ch1, ch2);
|
|
3784 |
br(NE, STR2_NEXT);
|
|
3785 |
b(MATCH);
|
|
3786 |
}
|
|
3787 |
|
|
3788 |
if (icnt1 == -1 || icnt1 == 1) {
|
|
3789 |
Label CH1_LOOP, HAS_ZERO;
|
|
3790 |
Label DO1_SHORT, DO1_LOOP;
|
|
3791 |
|
|
3792 |
BIND(DO1);
|
|
3793 |
ldrh(ch1, str1);
|
|
3794 |
cmp(cnt2, 4);
|
|
3795 |
br(LT, DO1_SHORT);
|
|
3796 |
|
|
3797 |
orr(ch1, ch1, ch1, LSL, 16);
|
|
3798 |
orr(ch1, ch1, ch1, LSL, 32);
|
|
3799 |
|
|
3800 |
sub(cnt2, cnt2, 4);
|
|
3801 |
mov(result_tmp, cnt2);
|
|
3802 |
lea(str2, Address(str2, cnt2, Address::uxtw(1)));
|
|
3803 |
sub(cnt2_neg, zr, cnt2, LSL, 1);
|
|
3804 |
|
|
3805 |
mov(tmp3, 0x0001000100010001);
|
|
3806 |
BIND(CH1_LOOP);
|
|
3807 |
ldr(ch2, Address(str2, cnt2_neg));
|
|
3808 |
eor(ch2, ch1, ch2);
|
|
3809 |
sub(tmp1, ch2, tmp3);
|
|
3810 |
orr(tmp2, ch2, 0x7fff7fff7fff7fff);
|
|
3811 |
bics(tmp1, tmp1, tmp2);
|
|
3812 |
br(NE, HAS_ZERO);
|
|
3813 |
adds(cnt2_neg, cnt2_neg, 8);
|
|
3814 |
br(LT, CH1_LOOP);
|
|
3815 |
|
|
3816 |
cmp(cnt2_neg, 8);
|
|
3817 |
mov(cnt2_neg, 0);
|
|
3818 |
br(LT, CH1_LOOP);
|
|
3819 |
b(NOMATCH);
|
|
3820 |
|
|
3821 |
BIND(HAS_ZERO);
|
|
3822 |
rev(tmp1, tmp1);
|
|
3823 |
clz(tmp1, tmp1);
|
|
3824 |
add(cnt2_neg, cnt2_neg, tmp1, LSR, 3);
|
|
3825 |
b(MATCH);
|
|
3826 |
|
|
3827 |
BIND(DO1_SHORT);
|
|
3828 |
mov(result_tmp, cnt2);
|
|
3829 |
lea(str2, Address(str2, cnt2, Address::uxtw(1)));
|
|
3830 |
sub(cnt2_neg, zr, cnt2, LSL, 1);
|
|
3831 |
BIND(DO1_LOOP);
|
|
3832 |
ldrh(ch2, Address(str2, cnt2_neg));
|
|
3833 |
cmpw(ch1, ch2);
|
|
3834 |
br(EQ, MATCH);
|
|
3835 |
adds(cnt2_neg, cnt2_neg, 2);
|
|
3836 |
br(LT, DO1_LOOP);
|
|
3837 |
}
|
|
3838 |
}
|
|
3839 |
BIND(NOMATCH);
|
|
3840 |
mov(result, -1);
|
|
3841 |
b(DONE);
|
|
3842 |
BIND(MATCH);
|
|
3843 |
add(result, result_tmp, cnt2_neg, ASR, 1);
|
|
3844 |
BIND(DONE);
|
|
3845 |
}
|
|
3846 |
|
|
3847 |
// Compare strings.
|
|
3848 |
void MacroAssembler::string_compare(Register str1, Register str2,
|
|
3849 |
Register cnt1, Register cnt2, Register result,
|
|
3850 |
Register tmp1) {
|
|
3851 |
Label LENGTH_DIFF, DONE, SHORT_LOOP, SHORT_STRING,
|
|
3852 |
NEXT_WORD, DIFFERENCE;
|
|
3853 |
|
|
3854 |
BLOCK_COMMENT("string_compare {");
|
|
3855 |
|
|
3856 |
// Compute the minimum of the string lengths and save the difference.
|
|
3857 |
subsw(tmp1, cnt1, cnt2);
|
|
3858 |
cselw(cnt2, cnt1, cnt2, Assembler::LE); // min
|
|
3859 |
|
|
3860 |
// A very short string
|
|
3861 |
cmpw(cnt2, 4);
|
|
3862 |
br(Assembler::LT, SHORT_STRING);
|
|
3863 |
|
|
3864 |
// Check if the strings start at the same location.
|
|
3865 |
cmp(str1, str2);
|
|
3866 |
br(Assembler::EQ, LENGTH_DIFF);
|
|
3867 |
|
|
3868 |
// Compare longwords
|
|
3869 |
{
|
|
3870 |
subw(cnt2, cnt2, 4); // The last longword is a special case
|
|
3871 |
|
|
3872 |
// Move both string pointers to the last longword of their
|
|
3873 |
// strings, negate the remaining count, and convert it to bytes.
|
|
3874 |
lea(str1, Address(str1, cnt2, Address::uxtw(1)));
|
|
3875 |
lea(str2, Address(str2, cnt2, Address::uxtw(1)));
|
|
3876 |
sub(cnt2, zr, cnt2, LSL, 1);
|
|
3877 |
|
|
3878 |
// Loop, loading longwords and comparing them into rscratch2.
|
|
3879 |
bind(NEXT_WORD);
|
|
3880 |
ldr(result, Address(str1, cnt2));
|
|
3881 |
ldr(cnt1, Address(str2, cnt2));
|
|
3882 |
adds(cnt2, cnt2, wordSize);
|
|
3883 |
eor(rscratch2, result, cnt1);
|
|
3884 |
cbnz(rscratch2, DIFFERENCE);
|
|
3885 |
br(Assembler::LT, NEXT_WORD);
|
|
3886 |
|
|
3887 |
// Last longword. In the case where length == 4 we compare the
|
|
3888 |
// same longword twice, but that's still faster than another
|
|
3889 |
// conditional branch.
|
|
3890 |
|
|
3891 |
ldr(result, Address(str1));
|
|
3892 |
ldr(cnt1, Address(str2));
|
|
3893 |
eor(rscratch2, result, cnt1);
|
|
3894 |
cbz(rscratch2, LENGTH_DIFF);
|
|
3895 |
|
|
3896 |
// Find the first different characters in the longwords and
|
|
3897 |
// compute their difference.
|
|
3898 |
bind(DIFFERENCE);
|
|
3899 |
rev(rscratch2, rscratch2);
|
|
3900 |
clz(rscratch2, rscratch2);
|
|
3901 |
andr(rscratch2, rscratch2, -16);
|
|
3902 |
lsrv(result, result, rscratch2);
|
|
3903 |
uxthw(result, result);
|
|
3904 |
lsrv(cnt1, cnt1, rscratch2);
|
|
3905 |
uxthw(cnt1, cnt1);
|
|
3906 |
subw(result, result, cnt1);
|
|
3907 |
b(DONE);
|
|
3908 |
}
|
|
3909 |
|
|
3910 |
bind(SHORT_STRING);
|
|
3911 |
// Is the minimum length zero?
|
|
3912 |
cbz(cnt2, LENGTH_DIFF);
|
|
3913 |
|
|
3914 |
bind(SHORT_LOOP);
|
|
3915 |
load_unsigned_short(result, Address(post(str1, 2)));
|
|
3916 |
load_unsigned_short(cnt1, Address(post(str2, 2)));
|
|
3917 |
subw(result, result, cnt1);
|
|
3918 |
cbnz(result, DONE);
|
|
3919 |
sub(cnt2, cnt2, 1);
|
|
3920 |
cbnz(cnt2, SHORT_LOOP);
|
|
3921 |
|
|
3922 |
// Strings are equal up to min length. Return the length difference.
|
|
3923 |
bind(LENGTH_DIFF);
|
|
3924 |
mov(result, tmp1);
|
|
3925 |
|
|
3926 |
// That's it
|
|
3927 |
bind(DONE);
|
|
3928 |
|
|
3929 |
BLOCK_COMMENT("} string_compare");
|
|
3930 |
}
|
|
3931 |
|
|
3932 |
|
|
3933 |
void MacroAssembler::string_equals(Register str1, Register str2,
|
|
3934 |
Register cnt, Register result,
|
|
3935 |
Register tmp1) {
|
|
3936 |
Label SAME_CHARS, DONE, SHORT_LOOP, SHORT_STRING,
|
|
3937 |
NEXT_WORD;
|
|
3938 |
|
|
3939 |
const Register tmp2 = rscratch1;
|
|
3940 |
assert_different_registers(str1, str2, cnt, result, tmp1, tmp2, rscratch2);
|
|
3941 |
|
|
3942 |
BLOCK_COMMENT("string_equals {");
|
|
3943 |
|
|
3944 |
// Start by assuming that the strings are not equal.
|
|
3945 |
mov(result, zr);
|
|
3946 |
|
|
3947 |
// A very short string
|
|
3948 |
cmpw(cnt, 4);
|
|
3949 |
br(Assembler::LT, SHORT_STRING);
|
|
3950 |
|
|
3951 |
// Check if the strings start at the same location.
|
|
3952 |
cmp(str1, str2);
|
|
3953 |
br(Assembler::EQ, SAME_CHARS);
|
|
3954 |
|
|
3955 |
// Compare longwords
|
|
3956 |
{
|
|
3957 |
subw(cnt, cnt, 4); // The last longword is a special case
|
|
3958 |
|
|
3959 |
// Move both string pointers to the last longword of their
|
|
3960 |
// strings, negate the remaining count, and convert it to bytes.
|
|
3961 |
lea(str1, Address(str1, cnt, Address::uxtw(1)));
|
|
3962 |
lea(str2, Address(str2, cnt, Address::uxtw(1)));
|
|
3963 |
sub(cnt, zr, cnt, LSL, 1);
|
|
3964 |
|
|
3965 |
// Loop, loading longwords and comparing them into rscratch2.
|
|
3966 |
bind(NEXT_WORD);
|
|
3967 |
ldr(tmp1, Address(str1, cnt));
|
|
3968 |
ldr(tmp2, Address(str2, cnt));
|
|
3969 |
adds(cnt, cnt, wordSize);
|
|
3970 |
eor(rscratch2, tmp1, tmp2);
|
|
3971 |
cbnz(rscratch2, DONE);
|
|
3972 |
br(Assembler::LT, NEXT_WORD);
|
|
3973 |
|
|
3974 |
// Last longword. In the case where length == 4 we compare the
|
|
3975 |
// same longword twice, but that's still faster than another
|
|
3976 |
// conditional branch.
|
|
3977 |
|
|
3978 |
ldr(tmp1, Address(str1));
|
|
3979 |
ldr(tmp2, Address(str2));
|
|
3980 |
eor(rscratch2, tmp1, tmp2);
|
|
3981 |
cbz(rscratch2, SAME_CHARS);
|
|
3982 |
b(DONE);
|
|
3983 |
}
|
|
3984 |
|
|
3985 |
bind(SHORT_STRING);
|
|
3986 |
// Is the length zero?
|
|
3987 |
cbz(cnt, SAME_CHARS);
|
|
3988 |
|
|
3989 |
bind(SHORT_LOOP);
|
|
3990 |
load_unsigned_short(tmp1, Address(post(str1, 2)));
|
|
3991 |
load_unsigned_short(tmp2, Address(post(str2, 2)));
|
|
3992 |
subw(tmp1, tmp1, tmp2);
|
|
3993 |
cbnz(tmp1, DONE);
|
|
3994 |
sub(cnt, cnt, 1);
|
|
3995 |
cbnz(cnt, SHORT_LOOP);
|
|
3996 |
|
|
3997 |
// Strings are equal.
|
|
3998 |
bind(SAME_CHARS);
|
|
3999 |
mov(result, true);
|
|
4000 |
|
|
4001 |
// That's it
|
|
4002 |
bind(DONE);
|
|
4003 |
|
|
4004 |
BLOCK_COMMENT("} string_equals");
|
|
4005 |
}
|
|
4006 |
|
|
4007 |
// Compare char[] arrays aligned to 4 bytes
|
|
4008 |
void MacroAssembler::char_arrays_equals(Register ary1, Register ary2,
|
|
4009 |
Register result, Register tmp1)
|
|
4010 |
{
|
|
4011 |
Register cnt1 = rscratch1;
|
|
4012 |
Register cnt2 = rscratch2;
|
|
4013 |
Register tmp2 = rscratch2;
|
|
4014 |
|
|
4015 |
Label SAME, DIFFER, NEXT, TAIL03, TAIL01;
|
|
4016 |
|
|
4017 |
int length_offset = arrayOopDesc::length_offset_in_bytes();
|
|
4018 |
int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR);
|
|
4019 |
|
|
4020 |
BLOCK_COMMENT("char_arrays_equals {");
|
|
4021 |
|
|
4022 |
// different until proven equal
|
|
4023 |
mov(result, false);
|
|
4024 |
|
|
4025 |
// same array?
|
|
4026 |
cmp(ary1, ary2);
|
|
4027 |
br(Assembler::EQ, SAME);
|
|
4028 |
|
|
4029 |
// ne if either null
|
|
4030 |
cbz(ary1, DIFFER);
|
|
4031 |
cbz(ary2, DIFFER);
|
|
4032 |
|
|
4033 |
// lengths ne?
|
|
4034 |
ldrw(cnt1, Address(ary1, length_offset));
|
|
4035 |
ldrw(cnt2, Address(ary2, length_offset));
|
|
4036 |
cmp(cnt1, cnt2);
|
|
4037 |
br(Assembler::NE, DIFFER);
|
|
4038 |
|
|
4039 |
lea(ary1, Address(ary1, base_offset));
|
|
4040 |
lea(ary2, Address(ary2, base_offset));
|
|
4041 |
|
|
4042 |
subs(cnt1, cnt1, 4);
|
|
4043 |
br(LT, TAIL03);
|
|
4044 |
|
|
4045 |
BIND(NEXT);
|
|
4046 |
ldr(tmp1, Address(post(ary1, 8)));
|
|
4047 |
ldr(tmp2, Address(post(ary2, 8)));
|
|
4048 |
subs(cnt1, cnt1, 4);
|
|
4049 |
eor(tmp1, tmp1, tmp2);
|
|
4050 |
cbnz(tmp1, DIFFER);
|
|
4051 |
br(GE, NEXT);
|
|
4052 |
|
|
4053 |
BIND(TAIL03); // 0-3 chars left, cnt1 = #chars left - 4
|
|
4054 |
tst(cnt1, 0b10);
|
|
4055 |
br(EQ, TAIL01);
|
|
4056 |
ldrw(tmp1, Address(post(ary1, 4)));
|
|
4057 |
ldrw(tmp2, Address(post(ary2, 4)));
|
|
4058 |
cmp(tmp1, tmp2);
|
|
4059 |
br(NE, DIFFER);
|
|
4060 |
BIND(TAIL01); // 0-1 chars left
|
|
4061 |
tst(cnt1, 0b01);
|
|
4062 |
br(EQ, SAME);
|
|
4063 |
ldrh(tmp1, ary1);
|
|
4064 |
ldrh(tmp2, ary2);
|
|
4065 |
cmp(tmp1, tmp2);
|
|
4066 |
br(NE, DIFFER);
|
|
4067 |
|
|
4068 |
BIND(SAME);
|
|
4069 |
mov(result, true);
|
|
4070 |
BIND(DIFFER); // result already set
|
|
4071 |
|
|
4072 |
BLOCK_COMMENT("} char_arrays_equals");
|
|
4073 |
}
|
|
4074 |
|
|
4075 |
// encode char[] to byte[] in ISO_8859_1
|
|
4076 |
void MacroAssembler::encode_iso_array(Register src, Register dst,
|
|
4077 |
Register len, Register result,
|
|
4078 |
FloatRegister Vtmp1, FloatRegister Vtmp2,
|
|
4079 |
FloatRegister Vtmp3, FloatRegister Vtmp4)
|
|
4080 |
{
|
|
4081 |
Label DONE, NEXT_32, LOOP_8, NEXT_8, LOOP_1, NEXT_1;
|
|
4082 |
Register tmp1 = rscratch1;
|
|
4083 |
|
|
4084 |
mov(result, len); // Save initial len
|
|
4085 |
|
|
4086 |
#ifndef BUILTIN_SIM
|
|
4087 |
subs(len, len, 32);
|
|
4088 |
br(LT, LOOP_8);
|
|
4089 |
|
|
4090 |
// The following code uses the SIMD 'uqxtn' and 'uqxtn2' instructions
|
|
4091 |
// to convert chars to bytes. These set the 'QC' bit in the FPSR if
|
|
4092 |
// any char could not fit in a byte, so clear the FPSR so we can test it.
|
|
4093 |
clear_fpsr();
|
|
4094 |
|
|
4095 |
BIND(NEXT_32);
|
|
4096 |
ld1(Vtmp1, Vtmp2, Vtmp3, Vtmp4, T8H, src);
|
|
4097 |
uqxtn(Vtmp1, T8B, Vtmp1, T8H); // uqxtn - write bottom half
|
|
4098 |
uqxtn(Vtmp1, T16B, Vtmp2, T8H); // uqxtn2 - write top half
|
|
4099 |
uqxtn(Vtmp2, T8B, Vtmp3, T8H);
|
|
4100 |
uqxtn(Vtmp2, T16B, Vtmp4, T8H); // uqxtn2
|
|
4101 |
get_fpsr(tmp1);
|
|
4102 |
cbnzw(tmp1, LOOP_8);
|
|
4103 |
st1(Vtmp1, Vtmp2, T16B, post(dst, 32));
|
|
4104 |
subs(len, len, 32);
|
|
4105 |
add(src, src, 64);
|
|
4106 |
br(GE, NEXT_32);
|
|
4107 |
|
|
4108 |
BIND(LOOP_8);
|
|
4109 |
adds(len, len, 32-8);
|
|
4110 |
br(LT, LOOP_1);
|
|
4111 |
clear_fpsr(); // QC may be set from loop above, clear again
|
|
4112 |
BIND(NEXT_8);
|
|
4113 |
ld1(Vtmp1, T8H, src);
|
|
4114 |
uqxtn(Vtmp1, T8B, Vtmp1, T8H);
|
|
4115 |
get_fpsr(tmp1);
|
|
4116 |
cbnzw(tmp1, LOOP_1);
|
|
4117 |
st1(Vtmp1, T8B, post(dst, 8));
|
|
4118 |
subs(len, len, 8);
|
|
4119 |
add(src, src, 16);
|
|
4120 |
br(GE, NEXT_8);
|
|
4121 |
|
|
4122 |
BIND(LOOP_1);
|
|
4123 |
adds(len, len, 8);
|
|
4124 |
br(LE, DONE);
|
|
4125 |
#else
|
|
4126 |
cbz(len, DONE);
|
|
4127 |
#endif
|
|
4128 |
BIND(NEXT_1);
|
|
4129 |
ldrh(tmp1, Address(post(src, 2)));
|
|
4130 |
tst(tmp1, 0xff00);
|
|
4131 |
br(NE, DONE);
|
|
4132 |
strb(tmp1, Address(post(dst, 1)));
|
|
4133 |
subs(len, len, 1);
|
|
4134 |
br(GT, NEXT_1);
|
|
4135 |
|
|
4136 |
BIND(DONE);
|
|
4137 |
sub(result, result, len); // Return index where we stopped
|
|
4138 |
}
|