author | aph |
Mon, 14 May 2018 12:03:59 +0100 | |
changeset 50104 | 4ea7917929b9 |
parent 49871 | 3325ee1c0fc4 |
child 52384 | d6dc479bcdd3 |
permissions | -rw-r--r-- |
29183 | 1 |
/* |
2 |
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. |
|
49845
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
3 |
* Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved. |
29183 | 4 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
5 |
* |
|
6 |
* This code is free software; you can redistribute it and/or modify it |
|
7 |
* under the terms of the GNU General Public License version 2 only, as |
|
8 |
* published by the Free Software Foundation. |
|
9 |
* |
|
10 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
11 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
12 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
13 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
14 |
* accompanied this code). |
|
15 |
* |
|
16 |
* You should have received a copy of the GNU General Public License version |
|
17 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
18 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
19 |
* |
|
20 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
21 |
* or visit www.oracle.com if you need additional information or have any |
|
22 |
* questions. |
|
23 |
* |
|
24 |
*/ |
|
25 |
||
26 |
#include "precompiled.hpp" |
|
27 |
#include "asm/macroAssembler.hpp" |
|
28 |
#include "memory/resourceArea.hpp" |
|
29 |
#include "nativeInst_aarch64.hpp" |
|
30 |
#include "oops/oop.inline.hpp" |
|
31 |
#include "runtime/handles.hpp" |
|
32 |
#include "runtime/sharedRuntime.hpp" |
|
33 |
#include "runtime/stubRoutines.hpp" |
|
34 |
#include "utilities/ostream.hpp" |
|
35 |
#ifdef COMPILER1 |
|
36 |
#include "c1/c1_Runtime1.hpp" |
|
37 |
#endif |
|
38 |
||
50104 | 39 |
void NativeCall::verify() { |
40 |
assert(NativeCall::is_call_at((address)this), "unexpected code at call site"); |
|
41 |
} |
|
42 |
||
43 |
void NativeInstruction::wrote(int offset) { |
|
44 |
ICache::invalidate_word(addr_at(offset)); |
|
45 |
} |
|
46 |
||
47 |
void NativeLoadGot::report_and_fail() const { |
|
48 |
tty->print_cr("Addr: " INTPTR_FORMAT, p2i(instruction_address())); |
|
49 |
fatal("not a indirect rip mov to rbx"); |
|
50 |
} |
|
51 |
||
52 |
void NativeLoadGot::verify() const { |
|
53 |
assert(is_adrp_at((address)this), "must be adrp"); |
|
54 |
} |
|
55 |
||
56 |
address NativeLoadGot::got_address() const { |
|
57 |
return MacroAssembler::target_addr_for_insn((address)this); |
|
58 |
} |
|
59 |
||
60 |
intptr_t NativeLoadGot::data() const { |
|
61 |
return *(intptr_t *) got_address(); |
|
62 |
} |
|
63 |
||
64 |
address NativePltCall::destination() const { |
|
65 |
NativeGotJump* jump = nativeGotJump_at(plt_jump()); |
|
66 |
return *(address*)MacroAssembler::target_addr_for_insn((address)jump); |
|
67 |
} |
|
68 |
||
69 |
address NativePltCall::plt_entry() const { |
|
70 |
return MacroAssembler::target_addr_for_insn((address)this); |
|
71 |
} |
|
72 |
||
73 |
address NativePltCall::plt_jump() const { |
|
74 |
address entry = plt_entry(); |
|
75 |
// Virtual PLT code has move instruction first |
|
76 |
if (((NativeGotJump*)entry)->is_GotJump()) { |
|
77 |
return entry; |
|
78 |
} else { |
|
79 |
return nativeLoadGot_at(entry)->next_instruction_address(); |
|
80 |
} |
|
81 |
} |
|
82 |
||
83 |
address NativePltCall::plt_load_got() const { |
|
84 |
address entry = plt_entry(); |
|
85 |
if (!((NativeGotJump*)entry)->is_GotJump()) { |
|
86 |
// Virtual PLT code has move instruction first |
|
87 |
return entry; |
|
88 |
} else { |
|
89 |
// Static PLT code has move instruction second (from c2i stub) |
|
90 |
return nativeGotJump_at(entry)->next_instruction_address(); |
|
91 |
} |
|
92 |
} |
|
93 |
||
94 |
address NativePltCall::plt_c2i_stub() const { |
|
95 |
address entry = plt_load_got(); |
|
96 |
// This method should be called only for static calls which has C2I stub. |
|
97 |
NativeLoadGot* load = nativeLoadGot_at(entry); |
|
98 |
return entry; |
|
99 |
} |
|
100 |
||
101 |
address NativePltCall::plt_resolve_call() const { |
|
102 |
NativeGotJump* jump = nativeGotJump_at(plt_jump()); |
|
103 |
address entry = jump->next_instruction_address(); |
|
104 |
if (((NativeGotJump*)entry)->is_GotJump()) { |
|
105 |
return entry; |
|
106 |
} else { |
|
107 |
// c2i stub 2 instructions |
|
108 |
entry = nativeLoadGot_at(entry)->next_instruction_address(); |
|
109 |
return nativeGotJump_at(entry)->next_instruction_address(); |
|
110 |
} |
|
111 |
} |
|
112 |
||
113 |
void NativePltCall::reset_to_plt_resolve_call() { |
|
114 |
set_destination_mt_safe(plt_resolve_call()); |
|
115 |
} |
|
116 |
||
117 |
void NativePltCall::set_destination_mt_safe(address dest) { |
|
118 |
// rewriting the value in the GOT, it should always be aligned |
|
119 |
NativeGotJump* jump = nativeGotJump_at(plt_jump()); |
|
120 |
address* got = (address *) jump->got_address(); |
|
121 |
*got = dest; |
|
122 |
} |
|
123 |
||
124 |
void NativePltCall::set_stub_to_clean() { |
|
125 |
NativeLoadGot* method_loader = nativeLoadGot_at(plt_c2i_stub()); |
|
126 |
NativeGotJump* jump = nativeGotJump_at(method_loader->next_instruction_address()); |
|
127 |
method_loader->set_data(0); |
|
128 |
jump->set_jump_destination((address)-1); |
|
129 |
} |
|
130 |
||
131 |
void NativePltCall::verify() const { |
|
132 |
assert(NativeCall::is_call_at((address)this), "unexpected code at call site"); |
|
133 |
} |
|
134 |
||
135 |
address NativeGotJump::got_address() const { |
|
136 |
return MacroAssembler::target_addr_for_insn((address)this); |
|
137 |
} |
|
138 |
||
139 |
address NativeGotJump::destination() const { |
|
140 |
address *got_entry = (address *) got_address(); |
|
141 |
return *got_entry; |
|
142 |
} |
|
143 |
||
144 |
bool NativeGotJump::is_GotJump() const { |
|
145 |
NativeInstruction *insn = |
|
146 |
nativeInstruction_at(addr_at(3 * NativeInstruction::instruction_size)); |
|
147 |
return insn->encoding() == 0xd61f0200; // br x16 |
|
148 |
} |
|
149 |
||
150 |
void NativeGotJump::verify() const { |
|
151 |
assert(is_adrp_at((address)this), "must be adrp"); |
|
152 |
} |
|
29183 | 153 |
|
154 |
address NativeCall::destination() const { |
|
155 |
address addr = (address)this; |
|
156 |
address destination = instruction_address() + displacement(); |
|
157 |
||
158 |
// Do we use a trampoline stub for this call? |
|
159 |
CodeBlob* cb = CodeCache::find_blob_unsafe(addr); // Else we get assertion if nmethod is zombie. |
|
160 |
assert(cb && cb->is_nmethod(), "sanity"); |
|
161 |
nmethod *nm = (nmethod *)cb; |
|
162 |
if (nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) { |
|
163 |
// Yes we do, so get the destination from the trampoline stub. |
|
164 |
const address trampoline_stub_addr = destination; |
|
165 |
destination = nativeCallTrampolineStub_at(trampoline_stub_addr)->destination(); |
|
166 |
} |
|
167 |
||
168 |
return destination; |
|
169 |
} |
|
170 |
||
171 |
// Similar to replace_mt_safe, but just changes the destination. The |
|
172 |
// important thing is that free-running threads are able to execute this |
|
173 |
// call instruction at all times. |
|
174 |
// |
|
175 |
// Used in the runtime linkage of calls; see class CompiledIC. |
|
176 |
// |
|
177 |
// Add parameter assert_lock to switch off assertion |
|
178 |
// during code generation, where no patching lock is needed. |
|
179 |
void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) { |
|
180 |
assert(!assert_lock || |
|
181 |
(Patching_lock->is_locked() || SafepointSynchronize::is_at_safepoint()), |
|
182 |
"concurrent code patching"); |
|
183 |
||
184 |
ResourceMark rm; |
|
185 |
int code_size = NativeInstruction::instruction_size; |
|
186 |
address addr_call = addr_at(0); |
|
50104 | 187 |
bool reachable = Assembler::reachable_from_branch_at(addr_call, dest); |
29183 | 188 |
assert(NativeCall::is_call_at(addr_call), "unexpected code at call site"); |
189 |
||
190 |
// Patch the constant in the call's trampoline stub. |
|
191 |
address trampoline_stub_addr = get_trampoline(); |
|
192 |
if (trampoline_stub_addr != NULL) { |
|
193 |
assert (! is_NativeCallTrampolineStub_at(dest), "chained trampolines"); |
|
194 |
nativeCallTrampolineStub_at(trampoline_stub_addr)->set_destination(dest); |
|
195 |
} |
|
196 |
||
197 |
// Patch the call. |
|
50104 | 198 |
if (reachable) { |
29183 | 199 |
set_destination(dest); |
200 |
} else { |
|
201 |
assert (trampoline_stub_addr != NULL, "we need a trampoline"); |
|
202 |
set_destination(trampoline_stub_addr); |
|
203 |
} |
|
204 |
||
205 |
ICache::invalidate_range(addr_call, instruction_size); |
|
206 |
} |
|
207 |
||
208 |
address NativeCall::get_trampoline() { |
|
209 |
address call_addr = addr_at(0); |
|
210 |
||
211 |
CodeBlob *code = CodeCache::find_blob(call_addr); |
|
212 |
assert(code != NULL, "Could not find the containing code blob"); |
|
213 |
||
214 |
address bl_destination |
|
215 |
= MacroAssembler::pd_call_destination(call_addr); |
|
38133
78b95467b9f1
8151956: Support non-continuous CodeBlobs in HotSpot
rbackman
parents:
36060
diff
changeset
|
216 |
if (code->contains(bl_destination) && |
29183 | 217 |
is_NativeCallTrampolineStub_at(bl_destination)) |
218 |
return bl_destination; |
|
219 |
||
50104 | 220 |
if (code->is_nmethod()) { |
221 |
return trampoline_stub_Relocation::get_trampoline_for(call_addr, (nmethod*)code); |
|
222 |
} |
|
223 |
||
224 |
return NULL; |
|
29183 | 225 |
} |
226 |
||
227 |
// Inserts a native call instruction at a given pc |
|
228 |
void NativeCall::insert(address code_pos, address entry) { Unimplemented(); } |
|
229 |
||
230 |
//------------------------------------------------------------------- |
|
231 |
||
232 |
void NativeMovConstReg::verify() { |
|
233 |
// make sure code pattern is actually mov reg64, imm64 instructions |
|
234 |
} |
|
235 |
||
236 |
||
237 |
intptr_t NativeMovConstReg::data() const { |
|
238 |
// das(uint64_t(instruction_address()),2); |
|
239 |
address addr = MacroAssembler::target_addr_for_insn(instruction_address()); |
|
240 |
if (maybe_cpool_ref(instruction_address())) { |
|
241 |
return *(intptr_t*)addr; |
|
242 |
} else { |
|
243 |
return (intptr_t)addr; |
|
244 |
} |
|
245 |
} |
|
246 |
||
247 |
void NativeMovConstReg::set_data(intptr_t x) { |
|
248 |
if (maybe_cpool_ref(instruction_address())) { |
|
249 |
address addr = MacroAssembler::target_addr_for_insn(instruction_address()); |
|
250 |
*(intptr_t*)addr = x; |
|
251 |
} else { |
|
49845
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
252 |
// Store x into the instruction stream. |
29183 | 253 |
MacroAssembler::pd_patch_instruction(instruction_address(), (address)x); |
254 |
ICache::invalidate_range(instruction_address(), instruction_size); |
|
255 |
} |
|
49845
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
256 |
|
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
257 |
// Find and replace the oop/metadata corresponding to this |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
258 |
// instruction in oops section. |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
259 |
CodeBlob* cb = CodeCache::find_blob(instruction_address()); |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
260 |
nmethod* nm = cb->as_nmethod_or_null(); |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
261 |
if (nm != NULL) { |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
262 |
RelocIterator iter(nm, instruction_address(), next_instruction_address()); |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
263 |
while (iter.next()) { |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
264 |
if (iter.type() == relocInfo::oop_type) { |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
265 |
oop* oop_addr = iter.oop_reloc()->oop_addr(); |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
266 |
*oop_addr = cast_to_oop(x); |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
267 |
break; |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
268 |
} else if (iter.type() == relocInfo::metadata_type) { |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
269 |
Metadata** metadata_addr = iter.metadata_reloc()->metadata_addr(); |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
270 |
*metadata_addr = (Metadata*)x; |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
271 |
break; |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
272 |
} |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
273 |
} |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
274 |
} |
36060 | 275 |
} |
29183 | 276 |
|
277 |
void NativeMovConstReg::print() { |
|
278 |
tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT, |
|
279 |
p2i(instruction_address()), data()); |
|
280 |
} |
|
281 |
||
282 |
//------------------------------------------------------------------- |
|
283 |
||
284 |
address NativeMovRegMem::instruction_address() const { return addr_at(instruction_offset); } |
|
285 |
||
286 |
int NativeMovRegMem::offset() const { |
|
287 |
address pc = instruction_address(); |
|
288 |
unsigned insn = *(unsigned*)pc; |
|
289 |
if (Instruction_aarch64::extract(insn, 28, 24) == 0b10000) { |
|
290 |
address addr = MacroAssembler::target_addr_for_insn(pc); |
|
291 |
return *addr; |
|
292 |
} else { |
|
293 |
return (int)(intptr_t)MacroAssembler::target_addr_for_insn(instruction_address()); |
|
294 |
} |
|
295 |
} |
|
296 |
||
297 |
void NativeMovRegMem::set_offset(int x) { |
|
298 |
address pc = instruction_address(); |
|
299 |
unsigned insn = *(unsigned*)pc; |
|
300 |
if (maybe_cpool_ref(pc)) { |
|
301 |
address addr = MacroAssembler::target_addr_for_insn(pc); |
|
302 |
*(long*)addr = x; |
|
303 |
} else { |
|
304 |
MacroAssembler::pd_patch_instruction(pc, (address)intptr_t(x)); |
|
305 |
ICache::invalidate_range(instruction_address(), instruction_size); |
|
306 |
} |
|
307 |
} |
|
308 |
||
309 |
void NativeMovRegMem::verify() { |
|
310 |
#ifdef ASSERT |
|
311 |
address dest = MacroAssembler::target_addr_for_insn(instruction_address()); |
|
312 |
#endif |
|
313 |
} |
|
314 |
||
315 |
//-------------------------------------------------------------------------------- |
|
316 |
||
317 |
void NativeJump::verify() { ; } |
|
318 |
||
319 |
||
320 |
void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) { |
|
321 |
} |
|
322 |
||
323 |
||
324 |
address NativeJump::jump_destination() const { |
|
325 |
address dest = MacroAssembler::target_addr_for_insn(instruction_address()); |
|
326 |
||
327 |
// We use jump to self as the unresolved address which the inline |
|
328 |
// cache code (and relocs) know about |
|
329 |
||
330 |
// return -1 if jump to self |
|
331 |
dest = (dest == (address) this) ? (address) -1 : dest; |
|
332 |
return dest; |
|
333 |
} |
|
334 |
||
335 |
void NativeJump::set_jump_destination(address dest) { |
|
336 |
// We use jump to self as the unresolved address which the inline |
|
337 |
// cache code (and relocs) know about |
|
338 |
if (dest == (address) -1) |
|
339 |
dest = instruction_address(); |
|
340 |
||
341 |
MacroAssembler::pd_patch_instruction(instruction_address(), dest); |
|
342 |
ICache::invalidate_range(instruction_address(), instruction_size); |
|
343 |
}; |
|
344 |
||
345 |
//------------------------------------------------------------------- |
|
346 |
||
36060 | 347 |
address NativeGeneralJump::jump_destination() const { |
348 |
NativeMovConstReg* move = nativeMovConstReg_at(instruction_address()); |
|
349 |
address dest = (address) move->data(); |
|
350 |
||
351 |
// We use jump to self as the unresolved address which the inline |
|
352 |
// cache code (and relocs) know about |
|
353 |
||
354 |
// return -1 if jump to self |
|
355 |
dest = (dest == (address) this) ? (address) -1 : dest; |
|
356 |
return dest; |
|
357 |
} |
|
358 |
||
359 |
void NativeGeneralJump::set_jump_destination(address dest) { |
|
360 |
NativeMovConstReg* move = nativeMovConstReg_at(instruction_address()); |
|
361 |
||
362 |
// We use jump to self as the unresolved address which the inline |
|
363 |
// cache code (and relocs) know about |
|
364 |
if (dest == (address) -1) { |
|
365 |
dest = instruction_address(); |
|
366 |
} |
|
367 |
||
368 |
move->set_data((uintptr_t) dest); |
|
369 |
}; |
|
370 |
||
371 |
//------------------------------------------------------------------- |
|
372 |
||
29183 | 373 |
bool NativeInstruction::is_safepoint_poll() { |
374 |
// a safepoint_poll is implemented in two steps as either |
|
375 |
// |
|
376 |
// adrp(reg, polling_page); |
|
377 |
// ldr(zr, [reg, #offset]); |
|
378 |
// |
|
379 |
// or |
|
380 |
// |
|
381 |
// mov(reg, polling_page); |
|
382 |
// ldr(zr, [reg, #offset]); |
|
383 |
// |
|
48127
efc459cf351e
8189596: AArch64: implementation for Thread-local handshakes
aph
parents:
47216
diff
changeset
|
384 |
// or |
efc459cf351e
8189596: AArch64: implementation for Thread-local handshakes
aph
parents:
47216
diff
changeset
|
385 |
// |
efc459cf351e
8189596: AArch64: implementation for Thread-local handshakes
aph
parents:
47216
diff
changeset
|
386 |
// ldr(reg, [rthread, #offset]); |
efc459cf351e
8189596: AArch64: implementation for Thread-local handshakes
aph
parents:
47216
diff
changeset
|
387 |
// ldr(zr, [reg, #offset]); |
efc459cf351e
8189596: AArch64: implementation for Thread-local handshakes
aph
parents:
47216
diff
changeset
|
388 |
// |
29183 | 389 |
// however, we cannot rely on the polling page address load always |
390 |
// directly preceding the read from the page. C1 does that but C2 |
|
391 |
// has to do the load and read as two independent instruction |
|
392 |
// generation steps. that's because with a single macro sequence the |
|
393 |
// generic C2 code can only add the oop map before the mov/adrp and |
|
394 |
// the trap handler expects an oop map to be associated with the |
|
395 |
// load. with the load scheuled as a prior step the oop map goes |
|
396 |
// where it is needed. |
|
397 |
// |
|
398 |
// so all we can do here is check that marked instruction is a load |
|
399 |
// word to zr |
|
400 |
return is_ldrw_to_zr(address(this)); |
|
401 |
} |
|
402 |
||
403 |
bool NativeInstruction::is_adrp_at(address instr) { |
|
404 |
unsigned insn = *(unsigned*)instr; |
|
405 |
return (Instruction_aarch64::extract(insn, 31, 24) & 0b10011111) == 0b10010000; |
|
406 |
} |
|
407 |
||
408 |
bool NativeInstruction::is_ldr_literal_at(address instr) { |
|
409 |
unsigned insn = *(unsigned*)instr; |
|
410 |
return (Instruction_aarch64::extract(insn, 29, 24) & 0b011011) == 0b00011000; |
|
411 |
} |
|
412 |
||
413 |
bool NativeInstruction::is_ldrw_to_zr(address instr) { |
|
414 |
unsigned insn = *(unsigned*)instr; |
|
415 |
return (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 && |
|
416 |
Instruction_aarch64::extract(insn, 4, 0) == 0b11111); |
|
417 |
} |
|
418 |
||
36060 | 419 |
bool NativeInstruction::is_general_jump() { |
420 |
if (is_movz()) { |
|
421 |
NativeInstruction* inst1 = nativeInstruction_at(addr_at(instruction_size * 1)); |
|
422 |
if (inst1->is_movk()) { |
|
423 |
NativeInstruction* inst2 = nativeInstruction_at(addr_at(instruction_size * 2)); |
|
424 |
if (inst2->is_movk()) { |
|
425 |
NativeInstruction* inst3 = nativeInstruction_at(addr_at(instruction_size * 3)); |
|
426 |
if (inst3->is_blr()) { |
|
427 |
return true; |
|
428 |
} |
|
429 |
} |
|
430 |
} |
|
431 |
} |
|
432 |
return false; |
|
433 |
} |
|
434 |
||
29183 | 435 |
bool NativeInstruction::is_movz() { |
436 |
return Instruction_aarch64::extract(int_at(0), 30, 23) == 0b10100101; |
|
437 |
} |
|
438 |
||
439 |
bool NativeInstruction::is_movk() { |
|
440 |
return Instruction_aarch64::extract(int_at(0), 30, 23) == 0b11100101; |
|
441 |
} |
|
442 |
||
443 |
bool NativeInstruction::is_sigill_zombie_not_entrant() { |
|
444 |
return uint_at(0) == 0xd4bbd5a1; // dcps1 #0xdead |
|
445 |
} |
|
446 |
||
447 |
void NativeIllegalInstruction::insert(address code_pos) { |
|
448 |
*(juint*)code_pos = 0xd4bbd5a1; // dcps1 #0xdead |
|
449 |
} |
|
450 |
||
451 |
//------------------------------------------------------------------- |
|
452 |
||
453 |
// MT-safe inserting of a jump over a jump or a nop (used by |
|
454 |
// nmethod::make_not_entrant_or_zombie) |
|
455 |
||
456 |
void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) { |
|
457 |
||
458 |
assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch"); |
|
50104 | 459 |
|
460 |
#ifdef ASSERT |
|
461 |
// This may be the temporary nmethod generated while we're AOT |
|
462 |
// compiling. Such an nmethod doesn't begin with a NOP but with an ADRP. |
|
463 |
if (! (CalculateClassFingerprint && UseAOT && is_adrp_at(verified_entry))) { |
|
464 |
assert(nativeInstruction_at(verified_entry)->is_jump_or_nop() |
|
465 |
|| nativeInstruction_at(verified_entry)->is_sigill_zombie_not_entrant(), |
|
466 |
"Aarch64 cannot replace non-jump with jump"); |
|
467 |
} |
|
468 |
#endif |
|
29183 | 469 |
|
470 |
// Patch this nmethod atomically. |
|
471 |
if (Assembler::reachable_from_branch_at(verified_entry, dest)) { |
|
472 |
ptrdiff_t disp = dest - verified_entry; |
|
473 |
guarantee(disp < 1 << 27 && disp > - (1 << 27), "branch overflow"); |
|
474 |
||
475 |
unsigned int insn = (0b000101 << 26) | ((disp >> 2) & 0x3ffffff); |
|
476 |
*(unsigned int*)verified_entry = insn; |
|
477 |
} else { |
|
478 |
// We use an illegal instruction for marking a method as |
|
479 |
// not_entrant or zombie. |
|
480 |
NativeIllegalInstruction::insert(verified_entry); |
|
481 |
} |
|
482 |
||
483 |
ICache::invalidate_range(verified_entry, instruction_size); |
|
484 |
} |
|
485 |
||
486 |
void NativeGeneralJump::verify() { } |
|
487 |
||
488 |
void NativeGeneralJump::insert_unconditional(address code_pos, address entry) { |
|
489 |
NativeGeneralJump* n_jump = (NativeGeneralJump*)code_pos; |
|
490 |
||
491 |
CodeBuffer cb(code_pos, instruction_size); |
|
492 |
MacroAssembler a(&cb); |
|
493 |
||
49871
3325ee1c0fc4
8200556: AArch64: assertion failure in slowdebug builds
aph
parents:
49845
diff
changeset
|
494 |
a.movptr(rscratch1, (uintptr_t)entry); |
29183 | 495 |
a.br(rscratch1); |
496 |
||
497 |
ICache::invalidate_range(code_pos, instruction_size); |
|
498 |
} |
|
499 |
||
500 |
// MT-safe patching of a long jump instruction. |
|
501 |
void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) { |
|
502 |
ShouldNotCallThis(); |
|
503 |
} |
|
504 |
||
505 |
address NativeCallTrampolineStub::destination(nmethod *nm) const { |
|
506 |
return ptr_at(data_offset); |
|
507 |
} |
|
508 |
||
509 |
void NativeCallTrampolineStub::set_destination(address new_destination) { |
|
510 |
set_ptr_at(data_offset, new_destination); |
|
511 |
OrderAccess::release(); |
|
512 |
} |
|
48487 | 513 |
|
514 |
// Generate a trampoline for a branch to dest. If there's no need for a |
|
515 |
// trampoline, simply patch the call directly to dest. |
|
516 |
address NativeCall::trampoline_jump(CodeBuffer &cbuf, address dest) { |
|
517 |
MacroAssembler a(&cbuf); |
|
518 |
address stub = NULL; |
|
519 |
||
520 |
if (a.far_branches() |
|
521 |
&& ! is_NativeCallTrampolineStub_at(instruction_address() + displacement())) { |
|
522 |
stub = a.emit_trampoline_stub(instruction_address() - cbuf.insts()->start(), dest); |
|
523 |
} |
|
524 |
||
525 |
if (stub == NULL) { |
|
526 |
// If we generated no stub, patch this call directly to dest. |
|
527 |
// This will happen if we don't need far branches or if there |
|
528 |
// already was a trampoline. |
|
529 |
set_destination(dest); |
|
530 |
} |
|
531 |
||
532 |
return stub; |
|
533 |
} |