29183
|
1 |
/*
|
|
2 |
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
|
3 |
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
|
4 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
5 |
*
|
|
6 |
* This code is free software; you can redistribute it and/or modify it
|
|
7 |
* under the terms of the GNU General Public License version 2 only, as
|
|
8 |
* published by the Free Software Foundation.
|
|
9 |
*
|
|
10 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
11 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
12 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
13 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
14 |
* accompanied this code).
|
|
15 |
*
|
|
16 |
* You should have received a copy of the GNU General Public License version
|
|
17 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
18 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
19 |
*
|
|
20 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
21 |
* or visit www.oracle.com if you need additional information or have any
|
|
22 |
* questions.
|
|
23 |
*
|
|
24 |
*/
|
|
25 |
|
|
26 |
#include "precompiled.hpp"
|
|
27 |
#include "asm/macroAssembler.hpp"
|
|
28 |
#include "memory/resourceArea.hpp"
|
|
29 |
#include "nativeInst_aarch64.hpp"
|
|
30 |
#include "oops/oop.inline.hpp"
|
|
31 |
#include "runtime/handles.hpp"
|
|
32 |
#include "runtime/sharedRuntime.hpp"
|
|
33 |
#include "runtime/stubRoutines.hpp"
|
|
34 |
#include "utilities/ostream.hpp"
|
|
35 |
#ifdef COMPILER1
|
|
36 |
#include "c1/c1_Runtime1.hpp"
|
|
37 |
#endif
|
|
38 |
|
|
39 |
void NativeCall::verify() { ; }
|
|
40 |
|
|
41 |
address NativeCall::destination() const {
|
|
42 |
address addr = (address)this;
|
|
43 |
address destination = instruction_address() + displacement();
|
|
44 |
|
|
45 |
// Do we use a trampoline stub for this call?
|
|
46 |
CodeBlob* cb = CodeCache::find_blob_unsafe(addr); // Else we get assertion if nmethod is zombie.
|
|
47 |
assert(cb && cb->is_nmethod(), "sanity");
|
|
48 |
nmethod *nm = (nmethod *)cb;
|
|
49 |
if (nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) {
|
|
50 |
// Yes we do, so get the destination from the trampoline stub.
|
|
51 |
const address trampoline_stub_addr = destination;
|
|
52 |
destination = nativeCallTrampolineStub_at(trampoline_stub_addr)->destination();
|
|
53 |
}
|
|
54 |
|
|
55 |
return destination;
|
|
56 |
}
|
|
57 |
|
|
58 |
// Similar to replace_mt_safe, but just changes the destination. The
|
|
59 |
// important thing is that free-running threads are able to execute this
|
|
60 |
// call instruction at all times.
|
|
61 |
//
|
|
62 |
// Used in the runtime linkage of calls; see class CompiledIC.
|
|
63 |
//
|
|
64 |
// Add parameter assert_lock to switch off assertion
|
|
65 |
// during code generation, where no patching lock is needed.
|
|
66 |
void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
|
|
67 |
assert(!assert_lock ||
|
|
68 |
(Patching_lock->is_locked() || SafepointSynchronize::is_at_safepoint()),
|
|
69 |
"concurrent code patching");
|
|
70 |
|
|
71 |
ResourceMark rm;
|
|
72 |
int code_size = NativeInstruction::instruction_size;
|
|
73 |
address addr_call = addr_at(0);
|
|
74 |
assert(NativeCall::is_call_at(addr_call), "unexpected code at call site");
|
|
75 |
|
|
76 |
// Patch the constant in the call's trampoline stub.
|
|
77 |
address trampoline_stub_addr = get_trampoline();
|
|
78 |
if (trampoline_stub_addr != NULL) {
|
|
79 |
assert (! is_NativeCallTrampolineStub_at(dest), "chained trampolines");
|
|
80 |
nativeCallTrampolineStub_at(trampoline_stub_addr)->set_destination(dest);
|
|
81 |
}
|
|
82 |
|
|
83 |
// Patch the call.
|
|
84 |
if (Assembler::reachable_from_branch_at(addr_call, dest)) {
|
|
85 |
set_destination(dest);
|
|
86 |
} else {
|
|
87 |
assert (trampoline_stub_addr != NULL, "we need a trampoline");
|
|
88 |
set_destination(trampoline_stub_addr);
|
|
89 |
}
|
|
90 |
|
|
91 |
ICache::invalidate_range(addr_call, instruction_size);
|
|
92 |
}
|
|
93 |
|
|
94 |
address NativeCall::get_trampoline() {
|
|
95 |
address call_addr = addr_at(0);
|
|
96 |
|
|
97 |
CodeBlob *code = CodeCache::find_blob(call_addr);
|
|
98 |
assert(code != NULL, "Could not find the containing code blob");
|
|
99 |
|
|
100 |
address bl_destination
|
|
101 |
= MacroAssembler::pd_call_destination(call_addr);
|
|
102 |
if (code->content_contains(bl_destination) &&
|
|
103 |
is_NativeCallTrampolineStub_at(bl_destination))
|
|
104 |
return bl_destination;
|
|
105 |
|
|
106 |
// If the codeBlob is not a nmethod, this is because we get here from the
|
|
107 |
// CodeBlob constructor, which is called within the nmethod constructor.
|
|
108 |
return trampoline_stub_Relocation::get_trampoline_for(call_addr, (nmethod*)code);
|
|
109 |
}
|
|
110 |
|
|
111 |
// Inserts a native call instruction at a given pc
|
|
112 |
void NativeCall::insert(address code_pos, address entry) { Unimplemented(); }
|
|
113 |
|
|
114 |
//-------------------------------------------------------------------
|
|
115 |
|
|
116 |
void NativeMovConstReg::verify() {
|
|
117 |
// make sure code pattern is actually mov reg64, imm64 instructions
|
|
118 |
}
|
|
119 |
|
|
120 |
|
|
121 |
intptr_t NativeMovConstReg::data() const {
|
|
122 |
// das(uint64_t(instruction_address()),2);
|
|
123 |
address addr = MacroAssembler::target_addr_for_insn(instruction_address());
|
|
124 |
if (maybe_cpool_ref(instruction_address())) {
|
|
125 |
return *(intptr_t*)addr;
|
|
126 |
} else {
|
|
127 |
return (intptr_t)addr;
|
|
128 |
}
|
|
129 |
}
|
|
130 |
|
|
131 |
void NativeMovConstReg::set_data(intptr_t x) {
|
|
132 |
if (maybe_cpool_ref(instruction_address())) {
|
|
133 |
address addr = MacroAssembler::target_addr_for_insn(instruction_address());
|
|
134 |
*(intptr_t*)addr = x;
|
|
135 |
} else {
|
|
136 |
MacroAssembler::pd_patch_instruction(instruction_address(), (address)x);
|
|
137 |
ICache::invalidate_range(instruction_address(), instruction_size);
|
|
138 |
}
|
|
139 |
};
|
|
140 |
|
|
141 |
void NativeMovConstReg::print() {
|
|
142 |
tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT,
|
|
143 |
p2i(instruction_address()), data());
|
|
144 |
}
|
|
145 |
|
|
146 |
//-------------------------------------------------------------------
|
|
147 |
|
|
148 |
address NativeMovRegMem::instruction_address() const { return addr_at(instruction_offset); }
|
|
149 |
|
|
150 |
int NativeMovRegMem::offset() const {
|
|
151 |
address pc = instruction_address();
|
|
152 |
unsigned insn = *(unsigned*)pc;
|
|
153 |
if (Instruction_aarch64::extract(insn, 28, 24) == 0b10000) {
|
|
154 |
address addr = MacroAssembler::target_addr_for_insn(pc);
|
|
155 |
return *addr;
|
|
156 |
} else {
|
|
157 |
return (int)(intptr_t)MacroAssembler::target_addr_for_insn(instruction_address());
|
|
158 |
}
|
|
159 |
}
|
|
160 |
|
|
161 |
void NativeMovRegMem::set_offset(int x) {
|
|
162 |
address pc = instruction_address();
|
|
163 |
unsigned insn = *(unsigned*)pc;
|
|
164 |
if (maybe_cpool_ref(pc)) {
|
|
165 |
address addr = MacroAssembler::target_addr_for_insn(pc);
|
|
166 |
*(long*)addr = x;
|
|
167 |
} else {
|
|
168 |
MacroAssembler::pd_patch_instruction(pc, (address)intptr_t(x));
|
|
169 |
ICache::invalidate_range(instruction_address(), instruction_size);
|
|
170 |
}
|
|
171 |
}
|
|
172 |
|
|
173 |
void NativeMovRegMem::verify() {
|
|
174 |
#ifdef ASSERT
|
|
175 |
address dest = MacroAssembler::target_addr_for_insn(instruction_address());
|
|
176 |
#endif
|
|
177 |
}
|
|
178 |
|
|
179 |
//--------------------------------------------------------------------------------
|
|
180 |
|
|
181 |
void NativeJump::verify() { ; }
|
|
182 |
|
|
183 |
|
|
184 |
void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) {
|
|
185 |
}
|
|
186 |
|
|
187 |
|
|
188 |
address NativeJump::jump_destination() const {
|
|
189 |
address dest = MacroAssembler::target_addr_for_insn(instruction_address());
|
|
190 |
|
|
191 |
// We use jump to self as the unresolved address which the inline
|
|
192 |
// cache code (and relocs) know about
|
|
193 |
|
|
194 |
// return -1 if jump to self
|
|
195 |
dest = (dest == (address) this) ? (address) -1 : dest;
|
|
196 |
return dest;
|
|
197 |
}
|
|
198 |
|
|
199 |
void NativeJump::set_jump_destination(address dest) {
|
|
200 |
// We use jump to self as the unresolved address which the inline
|
|
201 |
// cache code (and relocs) know about
|
|
202 |
if (dest == (address) -1)
|
|
203 |
dest = instruction_address();
|
|
204 |
|
|
205 |
MacroAssembler::pd_patch_instruction(instruction_address(), dest);
|
|
206 |
ICache::invalidate_range(instruction_address(), instruction_size);
|
|
207 |
};
|
|
208 |
|
|
209 |
//-------------------------------------------------------------------
|
|
210 |
|
|
211 |
bool NativeInstruction::is_safepoint_poll() {
|
|
212 |
// a safepoint_poll is implemented in two steps as either
|
|
213 |
//
|
|
214 |
// adrp(reg, polling_page);
|
|
215 |
// ldr(zr, [reg, #offset]);
|
|
216 |
//
|
|
217 |
// or
|
|
218 |
//
|
|
219 |
// mov(reg, polling_page);
|
|
220 |
// ldr(zr, [reg, #offset]);
|
|
221 |
//
|
|
222 |
// however, we cannot rely on the polling page address load always
|
|
223 |
// directly preceding the read from the page. C1 does that but C2
|
|
224 |
// has to do the load and read as two independent instruction
|
|
225 |
// generation steps. that's because with a single macro sequence the
|
|
226 |
// generic C2 code can only add the oop map before the mov/adrp and
|
|
227 |
// the trap handler expects an oop map to be associated with the
|
|
228 |
// load. with the load scheuled as a prior step the oop map goes
|
|
229 |
// where it is needed.
|
|
230 |
//
|
|
231 |
// so all we can do here is check that marked instruction is a load
|
|
232 |
// word to zr
|
|
233 |
return is_ldrw_to_zr(address(this));
|
|
234 |
}
|
|
235 |
|
|
236 |
bool NativeInstruction::is_adrp_at(address instr) {
|
|
237 |
unsigned insn = *(unsigned*)instr;
|
|
238 |
return (Instruction_aarch64::extract(insn, 31, 24) & 0b10011111) == 0b10010000;
|
|
239 |
}
|
|
240 |
|
|
241 |
bool NativeInstruction::is_ldr_literal_at(address instr) {
|
|
242 |
unsigned insn = *(unsigned*)instr;
|
|
243 |
return (Instruction_aarch64::extract(insn, 29, 24) & 0b011011) == 0b00011000;
|
|
244 |
}
|
|
245 |
|
|
246 |
bool NativeInstruction::is_ldrw_to_zr(address instr) {
|
|
247 |
unsigned insn = *(unsigned*)instr;
|
|
248 |
return (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 &&
|
|
249 |
Instruction_aarch64::extract(insn, 4, 0) == 0b11111);
|
|
250 |
}
|
|
251 |
|
|
252 |
bool NativeInstruction::is_movz() {
|
|
253 |
return Instruction_aarch64::extract(int_at(0), 30, 23) == 0b10100101;
|
|
254 |
}
|
|
255 |
|
|
256 |
bool NativeInstruction::is_movk() {
|
|
257 |
return Instruction_aarch64::extract(int_at(0), 30, 23) == 0b11100101;
|
|
258 |
}
|
|
259 |
|
|
260 |
bool NativeInstruction::is_sigill_zombie_not_entrant() {
|
|
261 |
return uint_at(0) == 0xd4bbd5a1; // dcps1 #0xdead
|
|
262 |
}
|
|
263 |
|
|
264 |
void NativeIllegalInstruction::insert(address code_pos) {
|
|
265 |
*(juint*)code_pos = 0xd4bbd5a1; // dcps1 #0xdead
|
|
266 |
}
|
|
267 |
|
|
268 |
//-------------------------------------------------------------------
|
|
269 |
|
|
270 |
// MT-safe inserting of a jump over a jump or a nop (used by
|
|
271 |
// nmethod::make_not_entrant_or_zombie)
|
|
272 |
|
|
273 |
void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
|
|
274 |
|
|
275 |
assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch");
|
|
276 |
assert(nativeInstruction_at(verified_entry)->is_jump_or_nop()
|
|
277 |
|| nativeInstruction_at(verified_entry)->is_sigill_zombie_not_entrant(),
|
|
278 |
"Aarch64 cannot replace non-jump with jump");
|
|
279 |
|
|
280 |
// Patch this nmethod atomically.
|
|
281 |
if (Assembler::reachable_from_branch_at(verified_entry, dest)) {
|
|
282 |
ptrdiff_t disp = dest - verified_entry;
|
|
283 |
guarantee(disp < 1 << 27 && disp > - (1 << 27), "branch overflow");
|
|
284 |
|
|
285 |
unsigned int insn = (0b000101 << 26) | ((disp >> 2) & 0x3ffffff);
|
|
286 |
*(unsigned int*)verified_entry = insn;
|
|
287 |
} else {
|
|
288 |
// We use an illegal instruction for marking a method as
|
|
289 |
// not_entrant or zombie.
|
|
290 |
NativeIllegalInstruction::insert(verified_entry);
|
|
291 |
}
|
|
292 |
|
|
293 |
ICache::invalidate_range(verified_entry, instruction_size);
|
|
294 |
}
|
|
295 |
|
|
296 |
void NativeGeneralJump::verify() { }
|
|
297 |
|
|
298 |
void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
|
|
299 |
NativeGeneralJump* n_jump = (NativeGeneralJump*)code_pos;
|
|
300 |
|
|
301 |
CodeBuffer cb(code_pos, instruction_size);
|
|
302 |
MacroAssembler a(&cb);
|
|
303 |
|
|
304 |
a.mov(rscratch1, entry);
|
|
305 |
a.br(rscratch1);
|
|
306 |
|
|
307 |
ICache::invalidate_range(code_pos, instruction_size);
|
|
308 |
}
|
|
309 |
|
|
310 |
// MT-safe patching of a long jump instruction.
|
|
311 |
void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
|
|
312 |
ShouldNotCallThis();
|
|
313 |
}
|
|
314 |
|
|
315 |
address NativeCallTrampolineStub::destination(nmethod *nm) const {
|
|
316 |
return ptr_at(data_offset);
|
|
317 |
}
|
|
318 |
|
|
319 |
void NativeCallTrampolineStub::set_destination(address new_destination) {
|
|
320 |
set_ptr_at(data_offset, new_destination);
|
|
321 |
OrderAccess::release();
|
|
322 |
}
|