author | mdoerr |
Thu, 15 Dec 2016 14:24:04 +0100 | |
changeset 42884 | 05815125c157 |
parent 38133 | 78b95467b9f1 |
permissions | -rw-r--r-- |
29183 | 1 |
/* |
2 |
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. |
|
3 |
* Copyright (c) 2014, Red Hat Inc. All rights reserved. |
|
4 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
5 |
* |
|
6 |
* This code is free software; you can redistribute it and/or modify it |
|
7 |
* under the terms of the GNU General Public License version 2 only, as |
|
8 |
* published by the Free Software Foundation. |
|
9 |
* |
|
10 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
11 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
12 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
13 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
14 |
* accompanied this code). |
|
15 |
* |
|
16 |
* You should have received a copy of the GNU General Public License version |
|
17 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
18 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
19 |
* |
|
20 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
21 |
* or visit www.oracle.com if you need additional information or have any |
|
22 |
* questions. |
|
23 |
* |
|
24 |
*/ |
|
25 |
||
26 |
#include "precompiled.hpp" |
|
27 |
#include "asm/macroAssembler.hpp" |
|
28 |
#include "memory/resourceArea.hpp" |
|
29 |
#include "nativeInst_aarch64.hpp" |
|
30 |
#include "oops/oop.inline.hpp" |
|
31 |
#include "runtime/handles.hpp" |
|
32 |
#include "runtime/sharedRuntime.hpp" |
|
33 |
#include "runtime/stubRoutines.hpp" |
|
34 |
#include "utilities/ostream.hpp" |
|
35 |
#ifdef COMPILER1 |
|
36 |
#include "c1/c1_Runtime1.hpp" |
|
37 |
#endif |
|
38 |
||
39 |
void NativeCall::verify() { ; } |
|
40 |
||
41 |
address NativeCall::destination() const { |
|
42 |
address addr = (address)this; |
|
43 |
address destination = instruction_address() + displacement(); |
|
44 |
||
45 |
// Do we use a trampoline stub for this call? |
|
46 |
CodeBlob* cb = CodeCache::find_blob_unsafe(addr); // Else we get assertion if nmethod is zombie. |
|
47 |
assert(cb && cb->is_nmethod(), "sanity"); |
|
48 |
nmethod *nm = (nmethod *)cb; |
|
49 |
if (nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) { |
|
50 |
// Yes we do, so get the destination from the trampoline stub. |
|
51 |
const address trampoline_stub_addr = destination; |
|
52 |
destination = nativeCallTrampolineStub_at(trampoline_stub_addr)->destination(); |
|
53 |
} |
|
54 |
||
55 |
return destination; |
|
56 |
} |
|
57 |
||
58 |
// Similar to replace_mt_safe, but just changes the destination. The |
|
59 |
// important thing is that free-running threads are able to execute this |
|
60 |
// call instruction at all times. |
|
61 |
// |
|
62 |
// Used in the runtime linkage of calls; see class CompiledIC. |
|
63 |
// |
|
64 |
// Add parameter assert_lock to switch off assertion |
|
65 |
// during code generation, where no patching lock is needed. |
|
66 |
void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) { |
|
67 |
assert(!assert_lock || |
|
68 |
(Patching_lock->is_locked() || SafepointSynchronize::is_at_safepoint()), |
|
69 |
"concurrent code patching"); |
|
70 |
||
71 |
ResourceMark rm; |
|
72 |
int code_size = NativeInstruction::instruction_size; |
|
73 |
address addr_call = addr_at(0); |
|
74 |
assert(NativeCall::is_call_at(addr_call), "unexpected code at call site"); |
|
75 |
||
76 |
// Patch the constant in the call's trampoline stub. |
|
77 |
address trampoline_stub_addr = get_trampoline(); |
|
78 |
if (trampoline_stub_addr != NULL) { |
|
79 |
assert (! is_NativeCallTrampolineStub_at(dest), "chained trampolines"); |
|
80 |
nativeCallTrampolineStub_at(trampoline_stub_addr)->set_destination(dest); |
|
81 |
} |
|
82 |
||
83 |
// Patch the call. |
|
84 |
if (Assembler::reachable_from_branch_at(addr_call, dest)) { |
|
85 |
set_destination(dest); |
|
86 |
} else { |
|
87 |
assert (trampoline_stub_addr != NULL, "we need a trampoline"); |
|
88 |
set_destination(trampoline_stub_addr); |
|
89 |
} |
|
90 |
||
91 |
ICache::invalidate_range(addr_call, instruction_size); |
|
92 |
} |
|
93 |
||
94 |
address NativeCall::get_trampoline() { |
|
95 |
address call_addr = addr_at(0); |
|
96 |
||
97 |
CodeBlob *code = CodeCache::find_blob(call_addr); |
|
98 |
assert(code != NULL, "Could not find the containing code blob"); |
|
99 |
||
100 |
address bl_destination |
|
101 |
= MacroAssembler::pd_call_destination(call_addr); |
|
38133
78b95467b9f1
8151956: Support non-continuous CodeBlobs in HotSpot
rbackman
parents:
36060
diff
changeset
|
102 |
if (code->contains(bl_destination) && |
29183 | 103 |
is_NativeCallTrampolineStub_at(bl_destination)) |
104 |
return bl_destination; |
|
105 |
||
106 |
// If the codeBlob is not a nmethod, this is because we get here from the |
|
107 |
// CodeBlob constructor, which is called within the nmethod constructor. |
|
108 |
return trampoline_stub_Relocation::get_trampoline_for(call_addr, (nmethod*)code); |
|
109 |
} |
|
110 |
||
111 |
// Inserts a native call instruction at a given pc |
|
112 |
void NativeCall::insert(address code_pos, address entry) { Unimplemented(); } |
|
113 |
||
114 |
//------------------------------------------------------------------- |
|
115 |
||
116 |
void NativeMovConstReg::verify() { |
|
117 |
// make sure code pattern is actually mov reg64, imm64 instructions |
|
118 |
} |
|
119 |
||
120 |
||
121 |
intptr_t NativeMovConstReg::data() const { |
|
122 |
// das(uint64_t(instruction_address()),2); |
|
123 |
address addr = MacroAssembler::target_addr_for_insn(instruction_address()); |
|
124 |
if (maybe_cpool_ref(instruction_address())) { |
|
125 |
return *(intptr_t*)addr; |
|
126 |
} else { |
|
127 |
return (intptr_t)addr; |
|
128 |
} |
|
129 |
} |
|
130 |
||
131 |
void NativeMovConstReg::set_data(intptr_t x) { |
|
132 |
if (maybe_cpool_ref(instruction_address())) { |
|
133 |
address addr = MacroAssembler::target_addr_for_insn(instruction_address()); |
|
134 |
*(intptr_t*)addr = x; |
|
135 |
} else { |
|
136 |
MacroAssembler::pd_patch_instruction(instruction_address(), (address)x); |
|
137 |
ICache::invalidate_range(instruction_address(), instruction_size); |
|
138 |
} |
|
36060 | 139 |
} |
29183 | 140 |
|
141 |
void NativeMovConstReg::print() { |
|
142 |
tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT, |
|
143 |
p2i(instruction_address()), data()); |
|
144 |
} |
|
145 |
||
146 |
//------------------------------------------------------------------- |
|
147 |
||
148 |
address NativeMovRegMem::instruction_address() const { return addr_at(instruction_offset); } |
|
149 |
||
150 |
int NativeMovRegMem::offset() const { |
|
151 |
address pc = instruction_address(); |
|
152 |
unsigned insn = *(unsigned*)pc; |
|
153 |
if (Instruction_aarch64::extract(insn, 28, 24) == 0b10000) { |
|
154 |
address addr = MacroAssembler::target_addr_for_insn(pc); |
|
155 |
return *addr; |
|
156 |
} else { |
|
157 |
return (int)(intptr_t)MacroAssembler::target_addr_for_insn(instruction_address()); |
|
158 |
} |
|
159 |
} |
|
160 |
||
161 |
void NativeMovRegMem::set_offset(int x) { |
|
162 |
address pc = instruction_address(); |
|
163 |
unsigned insn = *(unsigned*)pc; |
|
164 |
if (maybe_cpool_ref(pc)) { |
|
165 |
address addr = MacroAssembler::target_addr_for_insn(pc); |
|
166 |
*(long*)addr = x; |
|
167 |
} else { |
|
168 |
MacroAssembler::pd_patch_instruction(pc, (address)intptr_t(x)); |
|
169 |
ICache::invalidate_range(instruction_address(), instruction_size); |
|
170 |
} |
|
171 |
} |
|
172 |
||
173 |
void NativeMovRegMem::verify() { |
|
174 |
#ifdef ASSERT |
|
175 |
address dest = MacroAssembler::target_addr_for_insn(instruction_address()); |
|
176 |
#endif |
|
177 |
} |
|
178 |
||
179 |
//-------------------------------------------------------------------------------- |
|
180 |
||
181 |
void NativeJump::verify() { ; } |
|
182 |
||
183 |
||
184 |
void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) { |
|
185 |
} |
|
186 |
||
187 |
||
188 |
address NativeJump::jump_destination() const { |
|
189 |
address dest = MacroAssembler::target_addr_for_insn(instruction_address()); |
|
190 |
||
191 |
// We use jump to self as the unresolved address which the inline |
|
192 |
// cache code (and relocs) know about |
|
193 |
||
194 |
// return -1 if jump to self |
|
195 |
dest = (dest == (address) this) ? (address) -1 : dest; |
|
196 |
return dest; |
|
197 |
} |
|
198 |
||
199 |
void NativeJump::set_jump_destination(address dest) { |
|
200 |
// We use jump to self as the unresolved address which the inline |
|
201 |
// cache code (and relocs) know about |
|
202 |
if (dest == (address) -1) |
|
203 |
dest = instruction_address(); |
|
204 |
||
205 |
MacroAssembler::pd_patch_instruction(instruction_address(), dest); |
|
206 |
ICache::invalidate_range(instruction_address(), instruction_size); |
|
207 |
}; |
|
208 |
||
209 |
//------------------------------------------------------------------- |
|
210 |
||
36060 | 211 |
address NativeGeneralJump::jump_destination() const { |
212 |
NativeMovConstReg* move = nativeMovConstReg_at(instruction_address()); |
|
213 |
address dest = (address) move->data(); |
|
214 |
||
215 |
// We use jump to self as the unresolved address which the inline |
|
216 |
// cache code (and relocs) know about |
|
217 |
||
218 |
// return -1 if jump to self |
|
219 |
dest = (dest == (address) this) ? (address) -1 : dest; |
|
220 |
return dest; |
|
221 |
} |
|
222 |
||
223 |
void NativeGeneralJump::set_jump_destination(address dest) { |
|
224 |
NativeMovConstReg* move = nativeMovConstReg_at(instruction_address()); |
|
225 |
||
226 |
// We use jump to self as the unresolved address which the inline |
|
227 |
// cache code (and relocs) know about |
|
228 |
if (dest == (address) -1) { |
|
229 |
dest = instruction_address(); |
|
230 |
} |
|
231 |
||
232 |
move->set_data((uintptr_t) dest); |
|
233 |
}; |
|
234 |
||
235 |
//------------------------------------------------------------------- |
|
236 |
||
29183 | 237 |
bool NativeInstruction::is_safepoint_poll() { |
238 |
// a safepoint_poll is implemented in two steps as either |
|
239 |
// |
|
240 |
// adrp(reg, polling_page); |
|
241 |
// ldr(zr, [reg, #offset]); |
|
242 |
// |
|
243 |
// or |
|
244 |
// |
|
245 |
// mov(reg, polling_page); |
|
246 |
// ldr(zr, [reg, #offset]); |
|
247 |
// |
|
248 |
// however, we cannot rely on the polling page address load always |
|
249 |
// directly preceding the read from the page. C1 does that but C2 |
|
250 |
// has to do the load and read as two independent instruction |
|
251 |
// generation steps. that's because with a single macro sequence the |
|
252 |
// generic C2 code can only add the oop map before the mov/adrp and |
|
253 |
// the trap handler expects an oop map to be associated with the |
|
254 |
// load. with the load scheuled as a prior step the oop map goes |
|
255 |
// where it is needed. |
|
256 |
// |
|
257 |
// so all we can do here is check that marked instruction is a load |
|
258 |
// word to zr |
|
259 |
return is_ldrw_to_zr(address(this)); |
|
260 |
} |
|
261 |
||
262 |
bool NativeInstruction::is_adrp_at(address instr) { |
|
263 |
unsigned insn = *(unsigned*)instr; |
|
264 |
return (Instruction_aarch64::extract(insn, 31, 24) & 0b10011111) == 0b10010000; |
|
265 |
} |
|
266 |
||
267 |
bool NativeInstruction::is_ldr_literal_at(address instr) { |
|
268 |
unsigned insn = *(unsigned*)instr; |
|
269 |
return (Instruction_aarch64::extract(insn, 29, 24) & 0b011011) == 0b00011000; |
|
270 |
} |
|
271 |
||
272 |
bool NativeInstruction::is_ldrw_to_zr(address instr) { |
|
273 |
unsigned insn = *(unsigned*)instr; |
|
274 |
return (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 && |
|
275 |
Instruction_aarch64::extract(insn, 4, 0) == 0b11111); |
|
276 |
} |
|
277 |
||
36060 | 278 |
bool NativeInstruction::is_general_jump() { |
279 |
if (is_movz()) { |
|
280 |
NativeInstruction* inst1 = nativeInstruction_at(addr_at(instruction_size * 1)); |
|
281 |
if (inst1->is_movk()) { |
|
282 |
NativeInstruction* inst2 = nativeInstruction_at(addr_at(instruction_size * 2)); |
|
283 |
if (inst2->is_movk()) { |
|
284 |
NativeInstruction* inst3 = nativeInstruction_at(addr_at(instruction_size * 3)); |
|
285 |
if (inst3->is_blr()) { |
|
286 |
return true; |
|
287 |
} |
|
288 |
} |
|
289 |
} |
|
290 |
} |
|
291 |
return false; |
|
292 |
} |
|
293 |
||
29183 | 294 |
bool NativeInstruction::is_movz() { |
295 |
return Instruction_aarch64::extract(int_at(0), 30, 23) == 0b10100101; |
|
296 |
} |
|
297 |
||
298 |
bool NativeInstruction::is_movk() { |
|
299 |
return Instruction_aarch64::extract(int_at(0), 30, 23) == 0b11100101; |
|
300 |
} |
|
301 |
||
302 |
bool NativeInstruction::is_sigill_zombie_not_entrant() { |
|
303 |
return uint_at(0) == 0xd4bbd5a1; // dcps1 #0xdead |
|
304 |
} |
|
305 |
||
306 |
void NativeIllegalInstruction::insert(address code_pos) { |
|
307 |
*(juint*)code_pos = 0xd4bbd5a1; // dcps1 #0xdead |
|
308 |
} |
|
309 |
||
310 |
//------------------------------------------------------------------- |
|
311 |
||
312 |
// MT-safe inserting of a jump over a jump or a nop (used by |
|
313 |
// nmethod::make_not_entrant_or_zombie) |
|
314 |
||
315 |
void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) { |
|
316 |
||
317 |
assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch"); |
|
318 |
assert(nativeInstruction_at(verified_entry)->is_jump_or_nop() |
|
319 |
|| nativeInstruction_at(verified_entry)->is_sigill_zombie_not_entrant(), |
|
320 |
"Aarch64 cannot replace non-jump with jump"); |
|
321 |
||
322 |
// Patch this nmethod atomically. |
|
323 |
if (Assembler::reachable_from_branch_at(verified_entry, dest)) { |
|
324 |
ptrdiff_t disp = dest - verified_entry; |
|
325 |
guarantee(disp < 1 << 27 && disp > - (1 << 27), "branch overflow"); |
|
326 |
||
327 |
unsigned int insn = (0b000101 << 26) | ((disp >> 2) & 0x3ffffff); |
|
328 |
*(unsigned int*)verified_entry = insn; |
|
329 |
} else { |
|
330 |
// We use an illegal instruction for marking a method as |
|
331 |
// not_entrant or zombie. |
|
332 |
NativeIllegalInstruction::insert(verified_entry); |
|
333 |
} |
|
334 |
||
335 |
ICache::invalidate_range(verified_entry, instruction_size); |
|
336 |
} |
|
337 |
||
338 |
void NativeGeneralJump::verify() { } |
|
339 |
||
340 |
void NativeGeneralJump::insert_unconditional(address code_pos, address entry) { |
|
341 |
NativeGeneralJump* n_jump = (NativeGeneralJump*)code_pos; |
|
342 |
||
343 |
CodeBuffer cb(code_pos, instruction_size); |
|
344 |
MacroAssembler a(&cb); |
|
345 |
||
346 |
a.mov(rscratch1, entry); |
|
347 |
a.br(rscratch1); |
|
348 |
||
349 |
ICache::invalidate_range(code_pos, instruction_size); |
|
350 |
} |
|
351 |
||
352 |
// MT-safe patching of a long jump instruction. |
|
353 |
void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) { |
|
354 |
ShouldNotCallThis(); |
|
355 |
} |
|
356 |
||
357 |
address NativeCallTrampolineStub::destination(nmethod *nm) const { |
|
358 |
return ptr_at(data_offset); |
|
359 |
} |
|
360 |
||
361 |
void NativeCallTrampolineStub::set_destination(address new_destination) { |
|
362 |
set_ptr_at(data_offset, new_destination); |
|
363 |
OrderAccess::release(); |
|
364 |
} |