author | qpzhang |
Fri, 19 Apr 2019 14:42:23 +0800 | |
changeset 54582 | 783ddd361177 |
parent 54440 | 23a04fe2aca2 |
child 58556 | ff8716224f35 |
permissions | -rw-r--r-- |
29183 | 1 |
/* |
2 |
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. |
|
49845
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
3 |
* Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved. |
29183 | 4 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
5 |
* |
|
6 |
* This code is free software; you can redistribute it and/or modify it |
|
7 |
* under the terms of the GNU General Public License version 2 only, as |
|
8 |
* published by the Free Software Foundation. |
|
9 |
* |
|
10 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
11 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
12 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
13 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
14 |
* accompanied this code). |
|
15 |
* |
|
16 |
* You should have received a copy of the GNU General Public License version |
|
17 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
18 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
19 |
* |
|
20 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
21 |
* or visit www.oracle.com if you need additional information or have any |
|
22 |
* questions. |
|
23 |
* |
|
24 |
*/ |
|
25 |
||
26 |
#include "precompiled.hpp" |
|
27 |
#include "asm/macroAssembler.hpp" |
|
52384
d6dc479bcdd3
8212681: Refactor IC locking to use a fine grained CompiledICLocker
eosterlund
parents:
50104
diff
changeset
|
28 |
#include "code/compiledIC.hpp" |
29183 | 29 |
#include "memory/resourceArea.hpp" |
30 |
#include "nativeInst_aarch64.hpp" |
|
31 |
#include "oops/oop.inline.hpp" |
|
32 |
#include "runtime/handles.hpp" |
|
33 |
#include "runtime/sharedRuntime.hpp" |
|
34 |
#include "runtime/stubRoutines.hpp" |
|
35 |
#include "utilities/ostream.hpp" |
|
36 |
#ifdef COMPILER1 |
|
37 |
#include "c1/c1_Runtime1.hpp" |
|
38 |
#endif |
|
39 |
||
50104 | 40 |
void NativeCall::verify() { |
41 |
assert(NativeCall::is_call_at((address)this), "unexpected code at call site"); |
|
42 |
} |
|
43 |
||
44 |
void NativeInstruction::wrote(int offset) { |
|
45 |
ICache::invalidate_word(addr_at(offset)); |
|
46 |
} |
|
47 |
||
48 |
void NativeLoadGot::report_and_fail() const { |
|
49 |
tty->print_cr("Addr: " INTPTR_FORMAT, p2i(instruction_address())); |
|
50 |
fatal("not a indirect rip mov to rbx"); |
|
51 |
} |
|
52 |
||
53 |
void NativeLoadGot::verify() const { |
|
54 |
assert(is_adrp_at((address)this), "must be adrp"); |
|
55 |
} |
|
56 |
||
57 |
address NativeLoadGot::got_address() const { |
|
58 |
return MacroAssembler::target_addr_for_insn((address)this); |
|
59 |
} |
|
60 |
||
61 |
intptr_t NativeLoadGot::data() const { |
|
62 |
return *(intptr_t *) got_address(); |
|
63 |
} |
|
64 |
||
65 |
address NativePltCall::destination() const { |
|
66 |
NativeGotJump* jump = nativeGotJump_at(plt_jump()); |
|
67 |
return *(address*)MacroAssembler::target_addr_for_insn((address)jump); |
|
68 |
} |
|
69 |
||
70 |
address NativePltCall::plt_entry() const { |
|
71 |
return MacroAssembler::target_addr_for_insn((address)this); |
|
72 |
} |
|
73 |
||
74 |
address NativePltCall::plt_jump() const { |
|
75 |
address entry = plt_entry(); |
|
76 |
// Virtual PLT code has move instruction first |
|
77 |
if (((NativeGotJump*)entry)->is_GotJump()) { |
|
78 |
return entry; |
|
79 |
} else { |
|
80 |
return nativeLoadGot_at(entry)->next_instruction_address(); |
|
81 |
} |
|
82 |
} |
|
83 |
||
84 |
address NativePltCall::plt_load_got() const { |
|
85 |
address entry = plt_entry(); |
|
86 |
if (!((NativeGotJump*)entry)->is_GotJump()) { |
|
87 |
// Virtual PLT code has move instruction first |
|
88 |
return entry; |
|
89 |
} else { |
|
90 |
// Static PLT code has move instruction second (from c2i stub) |
|
91 |
return nativeGotJump_at(entry)->next_instruction_address(); |
|
92 |
} |
|
93 |
} |
|
94 |
||
95 |
address NativePltCall::plt_c2i_stub() const { |
|
96 |
address entry = plt_load_got(); |
|
97 |
// This method should be called only for static calls which has C2I stub. |
|
98 |
NativeLoadGot* load = nativeLoadGot_at(entry); |
|
99 |
return entry; |
|
100 |
} |
|
101 |
||
102 |
address NativePltCall::plt_resolve_call() const { |
|
103 |
NativeGotJump* jump = nativeGotJump_at(plt_jump()); |
|
104 |
address entry = jump->next_instruction_address(); |
|
105 |
if (((NativeGotJump*)entry)->is_GotJump()) { |
|
106 |
return entry; |
|
107 |
} else { |
|
108 |
// c2i stub 2 instructions |
|
109 |
entry = nativeLoadGot_at(entry)->next_instruction_address(); |
|
110 |
return nativeGotJump_at(entry)->next_instruction_address(); |
|
111 |
} |
|
112 |
} |
|
113 |
||
114 |
void NativePltCall::reset_to_plt_resolve_call() { |
|
115 |
set_destination_mt_safe(plt_resolve_call()); |
|
116 |
} |
|
117 |
||
118 |
void NativePltCall::set_destination_mt_safe(address dest) { |
|
119 |
// rewriting the value in the GOT, it should always be aligned |
|
120 |
NativeGotJump* jump = nativeGotJump_at(plt_jump()); |
|
121 |
address* got = (address *) jump->got_address(); |
|
122 |
*got = dest; |
|
123 |
} |
|
124 |
||
125 |
void NativePltCall::set_stub_to_clean() { |
|
126 |
NativeLoadGot* method_loader = nativeLoadGot_at(plt_c2i_stub()); |
|
127 |
NativeGotJump* jump = nativeGotJump_at(method_loader->next_instruction_address()); |
|
128 |
method_loader->set_data(0); |
|
129 |
jump->set_jump_destination((address)-1); |
|
130 |
} |
|
131 |
||
132 |
void NativePltCall::verify() const { |
|
133 |
assert(NativeCall::is_call_at((address)this), "unexpected code at call site"); |
|
134 |
} |
|
135 |
||
136 |
address NativeGotJump::got_address() const { |
|
137 |
return MacroAssembler::target_addr_for_insn((address)this); |
|
138 |
} |
|
139 |
||
140 |
address NativeGotJump::destination() const { |
|
141 |
address *got_entry = (address *) got_address(); |
|
142 |
return *got_entry; |
|
143 |
} |
|
144 |
||
145 |
bool NativeGotJump::is_GotJump() const { |
|
146 |
NativeInstruction *insn = |
|
147 |
nativeInstruction_at(addr_at(3 * NativeInstruction::instruction_size)); |
|
148 |
return insn->encoding() == 0xd61f0200; // br x16 |
|
149 |
} |
|
150 |
||
151 |
void NativeGotJump::verify() const { |
|
152 |
assert(is_adrp_at((address)this), "must be adrp"); |
|
153 |
} |
|
29183 | 154 |
|
155 |
address NativeCall::destination() const { |
|
156 |
address addr = (address)this; |
|
157 |
address destination = instruction_address() + displacement(); |
|
158 |
||
159 |
// Do we use a trampoline stub for this call? |
|
160 |
CodeBlob* cb = CodeCache::find_blob_unsafe(addr); // Else we get assertion if nmethod is zombie. |
|
161 |
assert(cb && cb->is_nmethod(), "sanity"); |
|
162 |
nmethod *nm = (nmethod *)cb; |
|
163 |
if (nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) { |
|
164 |
// Yes we do, so get the destination from the trampoline stub. |
|
165 |
const address trampoline_stub_addr = destination; |
|
166 |
destination = nativeCallTrampolineStub_at(trampoline_stub_addr)->destination(); |
|
167 |
} |
|
168 |
||
169 |
return destination; |
|
170 |
} |
|
171 |
||
172 |
// Similar to replace_mt_safe, but just changes the destination. The |
|
173 |
// important thing is that free-running threads are able to execute this |
|
174 |
// call instruction at all times. |
|
175 |
// |
|
176 |
// Used in the runtime linkage of calls; see class CompiledIC. |
|
177 |
// |
|
178 |
// Add parameter assert_lock to switch off assertion |
|
179 |
// during code generation, where no patching lock is needed. |
|
180 |
void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) { |
|
181 |
assert(!assert_lock || |
|
52384
d6dc479bcdd3
8212681: Refactor IC locking to use a fine grained CompiledICLocker
eosterlund
parents:
50104
diff
changeset
|
182 |
(Patching_lock->is_locked() || SafepointSynchronize::is_at_safepoint()) || |
d6dc479bcdd3
8212681: Refactor IC locking to use a fine grained CompiledICLocker
eosterlund
parents:
50104
diff
changeset
|
183 |
CompiledICLocker::is_safe(addr_at(0)), |
29183 | 184 |
"concurrent code patching"); |
185 |
||
186 |
ResourceMark rm; |
|
187 |
int code_size = NativeInstruction::instruction_size; |
|
188 |
address addr_call = addr_at(0); |
|
50104 | 189 |
bool reachable = Assembler::reachable_from_branch_at(addr_call, dest); |
29183 | 190 |
assert(NativeCall::is_call_at(addr_call), "unexpected code at call site"); |
191 |
||
192 |
// Patch the constant in the call's trampoline stub. |
|
193 |
address trampoline_stub_addr = get_trampoline(); |
|
194 |
if (trampoline_stub_addr != NULL) { |
|
195 |
assert (! is_NativeCallTrampolineStub_at(dest), "chained trampolines"); |
|
196 |
nativeCallTrampolineStub_at(trampoline_stub_addr)->set_destination(dest); |
|
197 |
} |
|
198 |
||
199 |
// Patch the call. |
|
50104 | 200 |
if (reachable) { |
29183 | 201 |
set_destination(dest); |
202 |
} else { |
|
203 |
assert (trampoline_stub_addr != NULL, "we need a trampoline"); |
|
204 |
set_destination(trampoline_stub_addr); |
|
205 |
} |
|
206 |
||
207 |
ICache::invalidate_range(addr_call, instruction_size); |
|
208 |
} |
|
209 |
||
210 |
address NativeCall::get_trampoline() { |
|
211 |
address call_addr = addr_at(0); |
|
212 |
||
213 |
CodeBlob *code = CodeCache::find_blob(call_addr); |
|
214 |
assert(code != NULL, "Could not find the containing code blob"); |
|
215 |
||
216 |
address bl_destination |
|
217 |
= MacroAssembler::pd_call_destination(call_addr); |
|
38133
78b95467b9f1
8151956: Support non-continuous CodeBlobs in HotSpot
rbackman
parents:
36060
diff
changeset
|
218 |
if (code->contains(bl_destination) && |
29183 | 219 |
is_NativeCallTrampolineStub_at(bl_destination)) |
220 |
return bl_destination; |
|
221 |
||
50104 | 222 |
if (code->is_nmethod()) { |
223 |
return trampoline_stub_Relocation::get_trampoline_for(call_addr, (nmethod*)code); |
|
224 |
} |
|
225 |
||
226 |
return NULL; |
|
29183 | 227 |
} |
228 |
||
229 |
// Inserts a native call instruction at a given pc |
|
230 |
void NativeCall::insert(address code_pos, address entry) { Unimplemented(); } |
|
231 |
||
232 |
//------------------------------------------------------------------- |
|
233 |
||
234 |
void NativeMovConstReg::verify() { |
|
54440
23a04fe2aca2
8219993: AArch64: Compiled CI stubs are unsafely modified
aph
parents:
52384
diff
changeset
|
235 |
if (! (nativeInstruction_at(instruction_address())->is_movz() || |
23a04fe2aca2
8219993: AArch64: Compiled CI stubs are unsafely modified
aph
parents:
52384
diff
changeset
|
236 |
is_adrp_at(instruction_address()) || |
23a04fe2aca2
8219993: AArch64: Compiled CI stubs are unsafely modified
aph
parents:
52384
diff
changeset
|
237 |
is_ldr_literal_at(instruction_address())) ) { |
23a04fe2aca2
8219993: AArch64: Compiled CI stubs are unsafely modified
aph
parents:
52384
diff
changeset
|
238 |
fatal("should be MOVZ or ADRP or LDR (literal)"); |
23a04fe2aca2
8219993: AArch64: Compiled CI stubs are unsafely modified
aph
parents:
52384
diff
changeset
|
239 |
} |
29183 | 240 |
} |
241 |
||
242 |
||
243 |
intptr_t NativeMovConstReg::data() const { |
|
244 |
// das(uint64_t(instruction_address()),2); |
|
245 |
address addr = MacroAssembler::target_addr_for_insn(instruction_address()); |
|
246 |
if (maybe_cpool_ref(instruction_address())) { |
|
247 |
return *(intptr_t*)addr; |
|
248 |
} else { |
|
249 |
return (intptr_t)addr; |
|
250 |
} |
|
251 |
} |
|
252 |
||
253 |
void NativeMovConstReg::set_data(intptr_t x) { |
|
254 |
if (maybe_cpool_ref(instruction_address())) { |
|
255 |
address addr = MacroAssembler::target_addr_for_insn(instruction_address()); |
|
256 |
*(intptr_t*)addr = x; |
|
257 |
} else { |
|
49845
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
258 |
// Store x into the instruction stream. |
29183 | 259 |
MacroAssembler::pd_patch_instruction(instruction_address(), (address)x); |
260 |
ICache::invalidate_range(instruction_address(), instruction_size); |
|
261 |
} |
|
49845
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
262 |
|
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
263 |
// Find and replace the oop/metadata corresponding to this |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
264 |
// instruction in oops section. |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
265 |
CodeBlob* cb = CodeCache::find_blob(instruction_address()); |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
266 |
nmethod* nm = cb->as_nmethod_or_null(); |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
267 |
if (nm != NULL) { |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
268 |
RelocIterator iter(nm, instruction_address(), next_instruction_address()); |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
269 |
while (iter.next()) { |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
270 |
if (iter.type() == relocInfo::oop_type) { |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
271 |
oop* oop_addr = iter.oop_reloc()->oop_addr(); |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
272 |
*oop_addr = cast_to_oop(x); |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
273 |
break; |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
274 |
} else if (iter.type() == relocInfo::metadata_type) { |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
275 |
Metadata** metadata_addr = iter.metadata_reloc()->metadata_addr(); |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
276 |
*metadata_addr = (Metadata*)x; |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
277 |
break; |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
278 |
} |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
279 |
} |
c508fda31759
8201597: AArch64: Update relocs for CompiledDirectStaticCall
aph
parents:
48487
diff
changeset
|
280 |
} |
36060 | 281 |
} |
29183 | 282 |
|
283 |
void NativeMovConstReg::print() { |
|
284 |
tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT, |
|
285 |
p2i(instruction_address()), data()); |
|
286 |
} |
|
287 |
||
288 |
//------------------------------------------------------------------- |
|
289 |
||
290 |
address NativeMovRegMem::instruction_address() const { return addr_at(instruction_offset); } |
|
291 |
||
292 |
int NativeMovRegMem::offset() const { |
|
293 |
address pc = instruction_address(); |
|
294 |
unsigned insn = *(unsigned*)pc; |
|
295 |
if (Instruction_aarch64::extract(insn, 28, 24) == 0b10000) { |
|
296 |
address addr = MacroAssembler::target_addr_for_insn(pc); |
|
297 |
return *addr; |
|
298 |
} else { |
|
299 |
return (int)(intptr_t)MacroAssembler::target_addr_for_insn(instruction_address()); |
|
300 |
} |
|
301 |
} |
|
302 |
||
303 |
void NativeMovRegMem::set_offset(int x) { |
|
304 |
address pc = instruction_address(); |
|
305 |
unsigned insn = *(unsigned*)pc; |
|
306 |
if (maybe_cpool_ref(pc)) { |
|
307 |
address addr = MacroAssembler::target_addr_for_insn(pc); |
|
308 |
*(long*)addr = x; |
|
309 |
} else { |
|
310 |
MacroAssembler::pd_patch_instruction(pc, (address)intptr_t(x)); |
|
311 |
ICache::invalidate_range(instruction_address(), instruction_size); |
|
312 |
} |
|
313 |
} |
|
314 |
||
315 |
void NativeMovRegMem::verify() { |
|
316 |
#ifdef ASSERT |
|
317 |
address dest = MacroAssembler::target_addr_for_insn(instruction_address()); |
|
318 |
#endif |
|
319 |
} |
|
320 |
||
321 |
//-------------------------------------------------------------------------------- |
|
322 |
||
323 |
void NativeJump::verify() { ; } |
|
324 |
||
325 |
||
326 |
void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) { |
|
327 |
} |
|
328 |
||
329 |
||
330 |
address NativeJump::jump_destination() const { |
|
331 |
address dest = MacroAssembler::target_addr_for_insn(instruction_address()); |
|
332 |
||
333 |
// We use jump to self as the unresolved address which the inline |
|
334 |
// cache code (and relocs) know about |
|
335 |
||
336 |
// return -1 if jump to self |
|
337 |
dest = (dest == (address) this) ? (address) -1 : dest; |
|
338 |
return dest; |
|
339 |
} |
|
340 |
||
341 |
void NativeJump::set_jump_destination(address dest) { |
|
342 |
// We use jump to self as the unresolved address which the inline |
|
343 |
// cache code (and relocs) know about |
|
344 |
if (dest == (address) -1) |
|
345 |
dest = instruction_address(); |
|
346 |
||
347 |
MacroAssembler::pd_patch_instruction(instruction_address(), dest); |
|
348 |
ICache::invalidate_range(instruction_address(), instruction_size); |
|
349 |
}; |
|
350 |
||
351 |
//------------------------------------------------------------------- |
|
352 |
||
36060 | 353 |
address NativeGeneralJump::jump_destination() const { |
354 |
NativeMovConstReg* move = nativeMovConstReg_at(instruction_address()); |
|
355 |
address dest = (address) move->data(); |
|
356 |
||
357 |
// We use jump to self as the unresolved address which the inline |
|
358 |
// cache code (and relocs) know about |
|
359 |
||
360 |
// return -1 if jump to self |
|
361 |
dest = (dest == (address) this) ? (address) -1 : dest; |
|
362 |
return dest; |
|
363 |
} |
|
364 |
||
365 |
void NativeGeneralJump::set_jump_destination(address dest) { |
|
366 |
NativeMovConstReg* move = nativeMovConstReg_at(instruction_address()); |
|
367 |
||
368 |
// We use jump to self as the unresolved address which the inline |
|
369 |
// cache code (and relocs) know about |
|
370 |
if (dest == (address) -1) { |
|
371 |
dest = instruction_address(); |
|
372 |
} |
|
373 |
||
374 |
move->set_data((uintptr_t) dest); |
|
375 |
}; |
|
376 |
||
377 |
//------------------------------------------------------------------- |
|
378 |
||
29183 | 379 |
bool NativeInstruction::is_safepoint_poll() { |
380 |
// a safepoint_poll is implemented in two steps as either |
|
381 |
// |
|
382 |
// adrp(reg, polling_page); |
|
383 |
// ldr(zr, [reg, #offset]); |
|
384 |
// |
|
385 |
// or |
|
386 |
// |
|
387 |
// mov(reg, polling_page); |
|
388 |
// ldr(zr, [reg, #offset]); |
|
389 |
// |
|
48127
efc459cf351e
8189596: AArch64: implementation for Thread-local handshakes
aph
parents:
47216
diff
changeset
|
390 |
// or |
efc459cf351e
8189596: AArch64: implementation for Thread-local handshakes
aph
parents:
47216
diff
changeset
|
391 |
// |
efc459cf351e
8189596: AArch64: implementation for Thread-local handshakes
aph
parents:
47216
diff
changeset
|
392 |
// ldr(reg, [rthread, #offset]); |
efc459cf351e
8189596: AArch64: implementation for Thread-local handshakes
aph
parents:
47216
diff
changeset
|
393 |
// ldr(zr, [reg, #offset]); |
efc459cf351e
8189596: AArch64: implementation for Thread-local handshakes
aph
parents:
47216
diff
changeset
|
394 |
// |
29183 | 395 |
// however, we cannot rely on the polling page address load always |
396 |
// directly preceding the read from the page. C1 does that but C2 |
|
397 |
// has to do the load and read as two independent instruction |
|
398 |
// generation steps. that's because with a single macro sequence the |
|
399 |
// generic C2 code can only add the oop map before the mov/adrp and |
|
400 |
// the trap handler expects an oop map to be associated with the |
|
401 |
// load. with the load scheuled as a prior step the oop map goes |
|
402 |
// where it is needed. |
|
403 |
// |
|
404 |
// so all we can do here is check that marked instruction is a load |
|
405 |
// word to zr |
|
406 |
return is_ldrw_to_zr(address(this)); |
|
407 |
} |
|
408 |
||
409 |
bool NativeInstruction::is_adrp_at(address instr) { |
|
410 |
unsigned insn = *(unsigned*)instr; |
|
411 |
return (Instruction_aarch64::extract(insn, 31, 24) & 0b10011111) == 0b10010000; |
|
412 |
} |
|
413 |
||
414 |
bool NativeInstruction::is_ldr_literal_at(address instr) { |
|
415 |
unsigned insn = *(unsigned*)instr; |
|
416 |
return (Instruction_aarch64::extract(insn, 29, 24) & 0b011011) == 0b00011000; |
|
417 |
} |
|
418 |
||
419 |
bool NativeInstruction::is_ldrw_to_zr(address instr) { |
|
420 |
unsigned insn = *(unsigned*)instr; |
|
421 |
return (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 && |
|
422 |
Instruction_aarch64::extract(insn, 4, 0) == 0b11111); |
|
423 |
} |
|
424 |
||
36060 | 425 |
bool NativeInstruction::is_general_jump() { |
426 |
if (is_movz()) { |
|
427 |
NativeInstruction* inst1 = nativeInstruction_at(addr_at(instruction_size * 1)); |
|
428 |
if (inst1->is_movk()) { |
|
429 |
NativeInstruction* inst2 = nativeInstruction_at(addr_at(instruction_size * 2)); |
|
430 |
if (inst2->is_movk()) { |
|
431 |
NativeInstruction* inst3 = nativeInstruction_at(addr_at(instruction_size * 3)); |
|
432 |
if (inst3->is_blr()) { |
|
433 |
return true; |
|
434 |
} |
|
435 |
} |
|
436 |
} |
|
437 |
} |
|
438 |
return false; |
|
439 |
} |
|
440 |
||
29183 | 441 |
bool NativeInstruction::is_movz() { |
442 |
return Instruction_aarch64::extract(int_at(0), 30, 23) == 0b10100101; |
|
443 |
} |
|
444 |
||
445 |
bool NativeInstruction::is_movk() { |
|
446 |
return Instruction_aarch64::extract(int_at(0), 30, 23) == 0b11100101; |
|
447 |
} |
|
448 |
||
449 |
bool NativeInstruction::is_sigill_zombie_not_entrant() { |
|
450 |
return uint_at(0) == 0xd4bbd5a1; // dcps1 #0xdead |
|
451 |
} |
|
452 |
||
453 |
void NativeIllegalInstruction::insert(address code_pos) { |
|
454 |
*(juint*)code_pos = 0xd4bbd5a1; // dcps1 #0xdead |
|
455 |
} |
|
456 |
||
457 |
//------------------------------------------------------------------- |
|
458 |
||
459 |
// MT-safe inserting of a jump over a jump or a nop (used by |
|
460 |
// nmethod::make_not_entrant_or_zombie) |
|
461 |
||
462 |
void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) { |
|
463 |
||
464 |
assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch"); |
|
50104 | 465 |
|
466 |
#ifdef ASSERT |
|
467 |
// This may be the temporary nmethod generated while we're AOT |
|
468 |
// compiling. Such an nmethod doesn't begin with a NOP but with an ADRP. |
|
469 |
if (! (CalculateClassFingerprint && UseAOT && is_adrp_at(verified_entry))) { |
|
470 |
assert(nativeInstruction_at(verified_entry)->is_jump_or_nop() |
|
471 |
|| nativeInstruction_at(verified_entry)->is_sigill_zombie_not_entrant(), |
|
472 |
"Aarch64 cannot replace non-jump with jump"); |
|
473 |
} |
|
474 |
#endif |
|
29183 | 475 |
|
476 |
// Patch this nmethod atomically. |
|
477 |
if (Assembler::reachable_from_branch_at(verified_entry, dest)) { |
|
478 |
ptrdiff_t disp = dest - verified_entry; |
|
479 |
guarantee(disp < 1 << 27 && disp > - (1 << 27), "branch overflow"); |
|
480 |
||
481 |
unsigned int insn = (0b000101 << 26) | ((disp >> 2) & 0x3ffffff); |
|
482 |
*(unsigned int*)verified_entry = insn; |
|
483 |
} else { |
|
484 |
// We use an illegal instruction for marking a method as |
|
485 |
// not_entrant or zombie. |
|
486 |
NativeIllegalInstruction::insert(verified_entry); |
|
487 |
} |
|
488 |
||
489 |
ICache::invalidate_range(verified_entry, instruction_size); |
|
490 |
} |
|
491 |
||
492 |
void NativeGeneralJump::verify() { } |
|
493 |
||
494 |
void NativeGeneralJump::insert_unconditional(address code_pos, address entry) { |
|
495 |
NativeGeneralJump* n_jump = (NativeGeneralJump*)code_pos; |
|
496 |
||
497 |
CodeBuffer cb(code_pos, instruction_size); |
|
498 |
MacroAssembler a(&cb); |
|
499 |
||
49871
3325ee1c0fc4
8200556: AArch64: assertion failure in slowdebug builds
aph
parents:
49845
diff
changeset
|
500 |
a.movptr(rscratch1, (uintptr_t)entry); |
29183 | 501 |
a.br(rscratch1); |
502 |
||
503 |
ICache::invalidate_range(code_pos, instruction_size); |
|
504 |
} |
|
505 |
||
506 |
// MT-safe patching of a long jump instruction. |
|
507 |
void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) { |
|
508 |
ShouldNotCallThis(); |
|
509 |
} |
|
510 |
||
511 |
address NativeCallTrampolineStub::destination(nmethod *nm) const { |
|
512 |
return ptr_at(data_offset); |
|
513 |
} |
|
514 |
||
515 |
void NativeCallTrampolineStub::set_destination(address new_destination) { |
|
516 |
set_ptr_at(data_offset, new_destination); |
|
517 |
OrderAccess::release(); |
|
518 |
} |
|
48487 | 519 |
|
520 |
// Generate a trampoline for a branch to dest. If there's no need for a |
|
521 |
// trampoline, simply patch the call directly to dest. |
|
522 |
address NativeCall::trampoline_jump(CodeBuffer &cbuf, address dest) { |
|
523 |
MacroAssembler a(&cbuf); |
|
524 |
address stub = NULL; |
|
525 |
||
526 |
if (a.far_branches() |
|
527 |
&& ! is_NativeCallTrampolineStub_at(instruction_address() + displacement())) { |
|
528 |
stub = a.emit_trampoline_stub(instruction_address() - cbuf.insts()->start(), dest); |
|
529 |
} |
|
530 |
||
531 |
if (stub == NULL) { |
|
532 |
// If we generated no stub, patch this call directly to dest. |
|
533 |
// This will happen if we don't need far branches or if there |
|
534 |
// already was a trampoline. |
|
535 |
set_destination(dest); |
|
536 |
} |
|
537 |
||
538 |
return stub; |
|
539 |
} |