author | stefank |
Fri, 04 May 2018 11:41:35 +0200 | |
changeset 49982 | 9042ffe5b7fe |
parent 49592 | 77fb0be7d19f |
permissions | -rw-r--r-- |
42664 | 1 |
/* |
2 |
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved. |
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 |
* or visit www.oracle.com if you need additional information or have any |
|
21 |
* questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
25 |
#include "precompiled.hpp" |
|
26 |
#include "assembler_arm.inline.hpp" |
|
27 |
#include "code/codeCache.hpp" |
|
28 |
#include "memory/resourceArea.hpp" |
|
29 |
#include "nativeInst_arm.hpp" |
|
49592
77fb0be7d19f
8199946: Move load/store and encode/decode out of oopDesc
stefank
parents:
47216
diff
changeset
|
30 |
#include "oops/compressedOops.inline.hpp" |
42664 | 31 |
#include "oops/klass.inline.hpp" |
49592
77fb0be7d19f
8199946: Move load/store and encode/decode out of oopDesc
stefank
parents:
47216
diff
changeset
|
32 |
#include "oops/oop.hpp" |
42664 | 33 |
#include "runtime/handles.hpp" |
34 |
#include "runtime/sharedRuntime.hpp" |
|
35 |
#include "runtime/stubRoutines.hpp" |
|
36 |
#include "utilities/ostream.hpp" |
|
37 |
#ifdef COMPILER1 |
|
38 |
#include "c1/c1_Runtime1.hpp" |
|
39 |
#endif |
|
40 |
||
41 |
void RawNativeInstruction::verify() { |
|
42 |
// make sure code pattern is actually an instruction address |
|
43 |
address addr = instruction_address(); |
|
44 |
if (addr == NULL || ((intptr_t)addr & (instruction_size - 1)) != 0) { |
|
45 |
fatal("not an instruction address"); |
|
46 |
} |
|
47 |
} |
|
48 |
||
49 |
void NativeMovRegMem::set_offset(int x) { |
|
50 |
int scale = get_offset_scale(); |
|
51 |
assert((x & right_n_bits(scale)) == 0, "offset should be aligned"); |
|
52 |
guarantee((x >> 24) == 0, "encoding constraint"); |
|
53 |
||
54 |
if (Assembler::is_unsigned_imm_in_range(x, 12, scale)) { |
|
55 |
set_unsigned_imm(x, 12, get_offset_scale(), 10); |
|
56 |
return; |
|
57 |
} |
|
58 |
||
59 |
// If offset is too large to be placed into single ldr/str instruction, we replace |
|
60 |
// ldr/str Rt, [Rn, #offset] |
|
61 |
// nop |
|
62 |
// with |
|
63 |
// add LR, Rn, #offset_hi |
|
64 |
// ldr/str Rt, [LR, #offset_lo] |
|
65 |
||
66 |
// Note: Rtemp cannot be used as a temporary register as it could be used |
|
67 |
// for value being stored (see LIR_Assembler::reg2mem). |
|
68 |
// Patchable NativeMovRegMem instructions are generated in LIR_Assembler::mem2reg and LIR_Assembler::reg2mem |
|
69 |
// which do not use LR, so it is free. Also, it does not conflict with LR usages in c1_LIRGenerator_arm.cpp. |
|
70 |
const int tmp = LR->encoding(); |
|
71 |
const int rn = (encoding() >> 5) & 0x1f; |
|
72 |
||
73 |
NativeInstruction* next = nativeInstruction_at(next_raw_instruction_address()); |
|
74 |
assert(next->is_nop(), "must be"); |
|
75 |
||
76 |
next->set_encoding((encoding() & 0xffc0001f) | Assembler::encode_unsigned_imm((x & 0xfff), 12, scale, 10) | tmp << 5); |
|
77 |
this->set_encoding(0x91400000 | Assembler::encode_unsigned_imm((x >> 12), 12, 0, 10) | rn << 5 | tmp); |
|
78 |
} |
|
79 |
||
80 |
intptr_t NativeMovConstReg::_data() const { |
|
81 |
#ifdef COMPILER2 |
|
82 |
if (is_movz()) { |
|
83 |
// narrow constant or ic call cached value |
|
84 |
RawNativeInstruction* ni = next_raw(); |
|
85 |
assert(ni->is_movk(), "movz;movk expected"); |
|
86 |
uint lo16 = (encoding() >> 5) & 0xffff; |
|
87 |
intptr_t hi = 0; |
|
88 |
int i = 0; |
|
89 |
while (ni->is_movk() && i < 3) { |
|
90 |
uint hi16 = (ni->encoding() >> 5) & 0xffff; |
|
91 |
int shift = ((ni->encoding() >> 21) & 0x3) << 4; |
|
92 |
hi |= (intptr_t)hi16 << shift; |
|
93 |
ni = ni->next_raw(); |
|
94 |
++i; |
|
95 |
} |
|
96 |
return lo16 | hi; |
|
97 |
} |
|
98 |
#endif |
|
99 |
return (intptr_t)(nativeLdrLiteral_at(instruction_address())->literal_value()); |
|
100 |
} |
|
101 |
||
102 |
static void raw_set_data(RawNativeInstruction* si, intptr_t x, oop* oop_addr, Metadata** metadata_addr) { |
|
103 |
#ifdef COMPILER2 |
|
104 |
if (si->is_movz()) { |
|
105 |
// narrow constant or ic call cached value |
|
106 |
uintptr_t nx = 0; |
|
107 |
int val_size = 32; |
|
108 |
if (oop_addr != NULL) { |
|
49592
77fb0be7d19f
8199946: Move load/store and encode/decode out of oopDesc
stefank
parents:
47216
diff
changeset
|
109 |
narrowOop encoded_oop = CompressedOops::encode(*oop_addr); |
42664 | 110 |
nx = encoded_oop; |
111 |
} else if (metadata_addr != NULL) { |
|
112 |
assert((*metadata_addr)->is_klass(), "expected Klass"); |
|
113 |
narrowKlass encoded_k = Klass::encode_klass((Klass *)*metadata_addr); |
|
114 |
nx = encoded_k; |
|
115 |
} else { |
|
116 |
nx = x; |
|
117 |
val_size = 64; |
|
118 |
} |
|
119 |
RawNativeInstruction* ni = si->next_raw(); |
|
120 |
uint lo16 = nx & 0xffff; |
|
121 |
int shift = 16; |
|
122 |
int imm16 = 0xffff << 5; |
|
123 |
si->set_encoding((si->encoding() & ~imm16) | (lo16 << 5)); |
|
124 |
while (shift < val_size) { |
|
125 |
assert(ni->is_movk(), "movk expected"); |
|
126 |
assert((((ni->encoding() >> 21) & 0x3) << 4) == shift, "wrong shift"); |
|
127 |
uint hi16 = (nx >> shift) & 0xffff; |
|
128 |
ni->set_encoding((ni->encoding() & ~imm16) | (hi16 << 5)); |
|
129 |
shift += 16; |
|
130 |
ni = ni->next_raw(); |
|
131 |
} |
|
132 |
return; |
|
133 |
} |
|
134 |
#endif |
|
135 |
||
136 |
assert(si->is_ldr_literal(), "should be"); |
|
137 |
||
138 |
if (oop_addr == NULL && metadata_addr == NULL) { |
|
139 |
// A static ldr_literal without oop_relocation |
|
140 |
nativeLdrLiteral_at(si->instruction_address())->set_literal_value((address)x); |
|
141 |
} else { |
|
142 |
// Oop is loaded from oops section |
|
143 |
address addr = oop_addr != NULL ? (address)oop_addr : (address)metadata_addr; |
|
144 |
int offset = addr - si->instruction_address(); |
|
145 |
||
146 |
assert((((intptr_t)addr) & 0x7) == 0, "target address should be aligned"); |
|
147 |
assert((offset & 0x3) == 0, "offset should be aligned"); |
|
148 |
||
149 |
guarantee(Assembler::is_offset_in_range(offset, 19), "offset is not in range"); |
|
150 |
nativeLdrLiteral_at(si->instruction_address())->set_literal_address(si->instruction_address() + offset); |
|
151 |
} |
|
152 |
} |
|
153 |
||
154 |
void NativeMovConstReg::set_data(intptr_t x) { |
|
155 |
// Find and replace the oop corresponding to this instruction in oops section |
|
156 |
oop* oop_addr = NULL; |
|
157 |
Metadata** metadata_addr = NULL; |
|
158 |
CodeBlob* cb = CodeCache::find_blob(instruction_address()); |
|
159 |
{ |
|
160 |
nmethod* nm = cb->as_nmethod_or_null(); |
|
161 |
if (nm != NULL) { |
|
162 |
RelocIterator iter(nm, instruction_address(), next_raw()->instruction_address()); |
|
163 |
while (iter.next()) { |
|
164 |
if (iter.type() == relocInfo::oop_type) { |
|
165 |
oop_addr = iter.oop_reloc()->oop_addr(); |
|
166 |
*oop_addr = cast_to_oop(x); |
|
167 |
break; |
|
168 |
} else if (iter.type() == relocInfo::metadata_type) { |
|
169 |
metadata_addr = iter.metadata_reloc()->metadata_addr(); |
|
170 |
*metadata_addr = (Metadata*)x; |
|
171 |
break; |
|
172 |
} |
|
173 |
} |
|
174 |
} |
|
175 |
} |
|
176 |
raw_set_data(adjust(this), x, oop_addr, metadata_addr); |
|
177 |
} |
|
178 |
||
179 |
void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) { |
|
180 |
} |
|
181 |
||
182 |
void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) { |
|
183 |
assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "should be"); |
|
184 |
||
185 |
NativeInstruction* instr = nativeInstruction_at(verified_entry); |
|
186 |
assert(instr->is_nop() || instr->encoding() == zombie_illegal_instruction, "required for MT-safe patching"); |
|
187 |
instr->set_encoding(zombie_illegal_instruction); |
|
188 |
} |
|
189 |
||
190 |
void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) { |
|
191 |
assert (nativeInstruction_at(instr_addr)->is_b(), "MT-safe patching of arbitrary instructions is not allowed"); |
|
192 |
assert (nativeInstruction_at(code_buffer)->is_nop(), "MT-safe patching of arbitrary instructions is not allowed"); |
|
193 |
nativeInstruction_at(instr_addr)->set_encoding(*(int*)code_buffer); |
|
194 |
} |
|
195 |
||
196 |
void NativeGeneralJump::insert_unconditional(address code_pos, address entry) { |
|
197 |
// Insert at code_pos unconditional B instruction jumping to entry |
|
198 |
intx offset = entry - code_pos; |
|
199 |
assert (Assembler::is_offset_in_range(offset, 26), "offset is out of range"); |
|
200 |
||
201 |
NativeInstruction* instr = nativeInstruction_at(code_pos); |
|
202 |
assert (instr->is_b() || instr->is_nop(), "MT-safe patching of arbitrary instructions is not allowed"); |
|
203 |
||
204 |
instr->set_encoding(0x5 << 26 | Assembler::encode_offset(offset, 26, 0)); |
|
205 |
} |
|
206 |
||
207 |
static address call_for(address return_address) { |
|
208 |
CodeBlob* cb = CodeCache::find_blob(return_address); |
|
209 |
nmethod* nm = cb->as_nmethod_or_null(); |
|
210 |
if (nm == NULL) { |
|
211 |
ShouldNotReachHere(); |
|
212 |
return NULL; |
|
213 |
} |
|
214 |
||
215 |
// Look back 8 instructions (for LIR_Assembler::ic_call and MacroAssembler::patchable_call) |
|
216 |
address begin = return_address - 8*NativeInstruction::instruction_size; |
|
217 |
if (begin < nm->code_begin()) { |
|
218 |
begin = nm->code_begin(); |
|
219 |
} |
|
220 |
RelocIterator iter(nm, begin, return_address); |
|
221 |
while (iter.next()) { |
|
222 |
Relocation* reloc = iter.reloc(); |
|
223 |
if (reloc->is_call()) { |
|
224 |
address call = reloc->addr(); |
|
225 |
if (nativeInstruction_at(call)->is_call()) { |
|
226 |
if (nativeCall_at(call)->return_address() == return_address) { |
|
227 |
return call; |
|
228 |
} |
|
229 |
} |
|
230 |
} |
|
231 |
} |
|
232 |
||
233 |
return NULL; |
|
234 |
} |
|
235 |
||
236 |
bool NativeCall::is_call_before(address return_address) { |
|
237 |
return (call_for(return_address) != NULL); |
|
238 |
} |
|
239 |
||
240 |
NativeCall* nativeCall_before(address return_address) { |
|
241 |
assert(NativeCall::is_call_before(return_address), "must be"); |
|
242 |
return nativeCall_at(call_for(return_address)); |
|
243 |
} |