42664
|
1 |
/*
|
|
2 |
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
|
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 |
*
|
|
5 |
* This code is free software; you can redistribute it and/or modify it
|
|
6 |
* under the terms of the GNU General Public License version 2 only, as
|
|
7 |
* published by the Free Software Foundation.
|
|
8 |
*
|
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
13 |
* accompanied this code).
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License version
|
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 |
*
|
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
20 |
* or visit www.oracle.com if you need additional information or have any
|
|
21 |
* questions.
|
|
22 |
*
|
|
23 |
*/
|
|
24 |
|
|
25 |
#include "precompiled.hpp"
|
|
26 |
#include "assembler_arm.inline.hpp"
|
|
27 |
#include "code/codeCache.hpp"
|
|
28 |
#include "memory/resourceArea.hpp"
|
|
29 |
#include "nativeInst_arm.hpp"
|
|
30 |
#include "oops/klass.inline.hpp"
|
|
31 |
#include "oops/oop.inline.hpp"
|
|
32 |
#include "runtime/handles.hpp"
|
|
33 |
#include "runtime/sharedRuntime.hpp"
|
|
34 |
#include "runtime/stubRoutines.hpp"
|
|
35 |
#include "utilities/ostream.hpp"
|
|
36 |
#ifdef COMPILER1
|
|
37 |
#include "c1/c1_Runtime1.hpp"
|
|
38 |
#endif
|
|
39 |
|
|
40 |
void RawNativeInstruction::verify() {
|
|
41 |
// make sure code pattern is actually an instruction address
|
|
42 |
address addr = instruction_address();
|
|
43 |
if (addr == NULL || ((intptr_t)addr & (instruction_size - 1)) != 0) {
|
|
44 |
fatal("not an instruction address");
|
|
45 |
}
|
|
46 |
}
|
|
47 |
|
|
48 |
void NativeMovRegMem::set_offset(int x) {
|
|
49 |
int scale = get_offset_scale();
|
|
50 |
assert((x & right_n_bits(scale)) == 0, "offset should be aligned");
|
|
51 |
guarantee((x >> 24) == 0, "encoding constraint");
|
|
52 |
|
|
53 |
if (Assembler::is_unsigned_imm_in_range(x, 12, scale)) {
|
|
54 |
set_unsigned_imm(x, 12, get_offset_scale(), 10);
|
|
55 |
return;
|
|
56 |
}
|
|
57 |
|
|
58 |
// If offset is too large to be placed into single ldr/str instruction, we replace
|
|
59 |
// ldr/str Rt, [Rn, #offset]
|
|
60 |
// nop
|
|
61 |
// with
|
|
62 |
// add LR, Rn, #offset_hi
|
|
63 |
// ldr/str Rt, [LR, #offset_lo]
|
|
64 |
|
|
65 |
// Note: Rtemp cannot be used as a temporary register as it could be used
|
|
66 |
// for value being stored (see LIR_Assembler::reg2mem).
|
|
67 |
// Patchable NativeMovRegMem instructions are generated in LIR_Assembler::mem2reg and LIR_Assembler::reg2mem
|
|
68 |
// which do not use LR, so it is free. Also, it does not conflict with LR usages in c1_LIRGenerator_arm.cpp.
|
|
69 |
const int tmp = LR->encoding();
|
|
70 |
const int rn = (encoding() >> 5) & 0x1f;
|
|
71 |
|
|
72 |
NativeInstruction* next = nativeInstruction_at(next_raw_instruction_address());
|
|
73 |
assert(next->is_nop(), "must be");
|
|
74 |
|
|
75 |
next->set_encoding((encoding() & 0xffc0001f) | Assembler::encode_unsigned_imm((x & 0xfff), 12, scale, 10) | tmp << 5);
|
|
76 |
this->set_encoding(0x91400000 | Assembler::encode_unsigned_imm((x >> 12), 12, 0, 10) | rn << 5 | tmp);
|
|
77 |
}
|
|
78 |
|
|
79 |
intptr_t NativeMovConstReg::_data() const {
|
|
80 |
#ifdef COMPILER2
|
|
81 |
if (is_movz()) {
|
|
82 |
// narrow constant or ic call cached value
|
|
83 |
RawNativeInstruction* ni = next_raw();
|
|
84 |
assert(ni->is_movk(), "movz;movk expected");
|
|
85 |
uint lo16 = (encoding() >> 5) & 0xffff;
|
|
86 |
intptr_t hi = 0;
|
|
87 |
int i = 0;
|
|
88 |
while (ni->is_movk() && i < 3) {
|
|
89 |
uint hi16 = (ni->encoding() >> 5) & 0xffff;
|
|
90 |
int shift = ((ni->encoding() >> 21) & 0x3) << 4;
|
|
91 |
hi |= (intptr_t)hi16 << shift;
|
|
92 |
ni = ni->next_raw();
|
|
93 |
++i;
|
|
94 |
}
|
|
95 |
return lo16 | hi;
|
|
96 |
}
|
|
97 |
#endif
|
|
98 |
return (intptr_t)(nativeLdrLiteral_at(instruction_address())->literal_value());
|
|
99 |
}
|
|
100 |
|
|
101 |
static void raw_set_data(RawNativeInstruction* si, intptr_t x, oop* oop_addr, Metadata** metadata_addr) {
|
|
102 |
#ifdef COMPILER2
|
|
103 |
if (si->is_movz()) {
|
|
104 |
// narrow constant or ic call cached value
|
|
105 |
uintptr_t nx = 0;
|
|
106 |
int val_size = 32;
|
|
107 |
if (oop_addr != NULL) {
|
|
108 |
narrowOop encoded_oop = oopDesc::encode_heap_oop(*oop_addr);
|
|
109 |
nx = encoded_oop;
|
|
110 |
} else if (metadata_addr != NULL) {
|
|
111 |
assert((*metadata_addr)->is_klass(), "expected Klass");
|
|
112 |
narrowKlass encoded_k = Klass::encode_klass((Klass *)*metadata_addr);
|
|
113 |
nx = encoded_k;
|
|
114 |
} else {
|
|
115 |
nx = x;
|
|
116 |
val_size = 64;
|
|
117 |
}
|
|
118 |
RawNativeInstruction* ni = si->next_raw();
|
|
119 |
uint lo16 = nx & 0xffff;
|
|
120 |
int shift = 16;
|
|
121 |
int imm16 = 0xffff << 5;
|
|
122 |
si->set_encoding((si->encoding() & ~imm16) | (lo16 << 5));
|
|
123 |
while (shift < val_size) {
|
|
124 |
assert(ni->is_movk(), "movk expected");
|
|
125 |
assert((((ni->encoding() >> 21) & 0x3) << 4) == shift, "wrong shift");
|
|
126 |
uint hi16 = (nx >> shift) & 0xffff;
|
|
127 |
ni->set_encoding((ni->encoding() & ~imm16) | (hi16 << 5));
|
|
128 |
shift += 16;
|
|
129 |
ni = ni->next_raw();
|
|
130 |
}
|
|
131 |
return;
|
|
132 |
}
|
|
133 |
#endif
|
|
134 |
|
|
135 |
assert(si->is_ldr_literal(), "should be");
|
|
136 |
|
|
137 |
if (oop_addr == NULL && metadata_addr == NULL) {
|
|
138 |
// A static ldr_literal without oop_relocation
|
|
139 |
nativeLdrLiteral_at(si->instruction_address())->set_literal_value((address)x);
|
|
140 |
} else {
|
|
141 |
// Oop is loaded from oops section
|
|
142 |
address addr = oop_addr != NULL ? (address)oop_addr : (address)metadata_addr;
|
|
143 |
int offset = addr - si->instruction_address();
|
|
144 |
|
|
145 |
assert((((intptr_t)addr) & 0x7) == 0, "target address should be aligned");
|
|
146 |
assert((offset & 0x3) == 0, "offset should be aligned");
|
|
147 |
|
|
148 |
guarantee(Assembler::is_offset_in_range(offset, 19), "offset is not in range");
|
|
149 |
nativeLdrLiteral_at(si->instruction_address())->set_literal_address(si->instruction_address() + offset);
|
|
150 |
}
|
|
151 |
}
|
|
152 |
|
|
153 |
void NativeMovConstReg::set_data(intptr_t x) {
|
|
154 |
// Find and replace the oop corresponding to this instruction in oops section
|
|
155 |
oop* oop_addr = NULL;
|
|
156 |
Metadata** metadata_addr = NULL;
|
|
157 |
CodeBlob* cb = CodeCache::find_blob(instruction_address());
|
|
158 |
{
|
|
159 |
nmethod* nm = cb->as_nmethod_or_null();
|
|
160 |
if (nm != NULL) {
|
|
161 |
RelocIterator iter(nm, instruction_address(), next_raw()->instruction_address());
|
|
162 |
while (iter.next()) {
|
|
163 |
if (iter.type() == relocInfo::oop_type) {
|
|
164 |
oop_addr = iter.oop_reloc()->oop_addr();
|
|
165 |
*oop_addr = cast_to_oop(x);
|
|
166 |
break;
|
|
167 |
} else if (iter.type() == relocInfo::metadata_type) {
|
|
168 |
metadata_addr = iter.metadata_reloc()->metadata_addr();
|
|
169 |
*metadata_addr = (Metadata*)x;
|
|
170 |
break;
|
|
171 |
}
|
|
172 |
}
|
|
173 |
}
|
|
174 |
}
|
|
175 |
raw_set_data(adjust(this), x, oop_addr, metadata_addr);
|
|
176 |
}
|
|
177 |
|
|
178 |
void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) {
|
|
179 |
}
|
|
180 |
|
|
181 |
void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
|
|
182 |
assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "should be");
|
|
183 |
|
|
184 |
NativeInstruction* instr = nativeInstruction_at(verified_entry);
|
|
185 |
assert(instr->is_nop() || instr->encoding() == zombie_illegal_instruction, "required for MT-safe patching");
|
|
186 |
instr->set_encoding(zombie_illegal_instruction);
|
|
187 |
}
|
|
188 |
|
|
189 |
void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
|
|
190 |
assert (nativeInstruction_at(instr_addr)->is_b(), "MT-safe patching of arbitrary instructions is not allowed");
|
|
191 |
assert (nativeInstruction_at(code_buffer)->is_nop(), "MT-safe patching of arbitrary instructions is not allowed");
|
|
192 |
nativeInstruction_at(instr_addr)->set_encoding(*(int*)code_buffer);
|
|
193 |
}
|
|
194 |
|
|
195 |
void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
|
|
196 |
// Insert at code_pos unconditional B instruction jumping to entry
|
|
197 |
intx offset = entry - code_pos;
|
|
198 |
assert (Assembler::is_offset_in_range(offset, 26), "offset is out of range");
|
|
199 |
|
|
200 |
NativeInstruction* instr = nativeInstruction_at(code_pos);
|
|
201 |
assert (instr->is_b() || instr->is_nop(), "MT-safe patching of arbitrary instructions is not allowed");
|
|
202 |
|
|
203 |
instr->set_encoding(0x5 << 26 | Assembler::encode_offset(offset, 26, 0));
|
|
204 |
}
|
|
205 |
|
|
206 |
static address call_for(address return_address) {
|
|
207 |
CodeBlob* cb = CodeCache::find_blob(return_address);
|
|
208 |
nmethod* nm = cb->as_nmethod_or_null();
|
|
209 |
if (nm == NULL) {
|
|
210 |
ShouldNotReachHere();
|
|
211 |
return NULL;
|
|
212 |
}
|
|
213 |
|
|
214 |
// Look back 8 instructions (for LIR_Assembler::ic_call and MacroAssembler::patchable_call)
|
|
215 |
address begin = return_address - 8*NativeInstruction::instruction_size;
|
|
216 |
if (begin < nm->code_begin()) {
|
|
217 |
begin = nm->code_begin();
|
|
218 |
}
|
|
219 |
RelocIterator iter(nm, begin, return_address);
|
|
220 |
while (iter.next()) {
|
|
221 |
Relocation* reloc = iter.reloc();
|
|
222 |
if (reloc->is_call()) {
|
|
223 |
address call = reloc->addr();
|
|
224 |
if (nativeInstruction_at(call)->is_call()) {
|
|
225 |
if (nativeCall_at(call)->return_address() == return_address) {
|
|
226 |
return call;
|
|
227 |
}
|
|
228 |
}
|
|
229 |
}
|
|
230 |
}
|
|
231 |
|
|
232 |
return NULL;
|
|
233 |
}
|
|
234 |
|
|
235 |
bool NativeCall::is_call_before(address return_address) {
|
|
236 |
return (call_for(return_address) != NULL);
|
|
237 |
}
|
|
238 |
|
|
239 |
NativeCall* nativeCall_before(address return_address) {
|
|
240 |
assert(NativeCall::is_call_before(return_address), "must be");
|
|
241 |
return nativeCall_at(call_for(return_address));
|
|
242 |
}
|
|
243 |
|