author | jwilhelm |
Thu, 12 Sep 2019 03:21:11 +0200 | |
changeset 58094 | 0f6c749acd15 |
parent 57811 | 947252a54b98 |
child 58679 | 9c3209ff7550 |
permissions | -rw-r--r-- |
42664 | 1 |
/* |
51756 | 2 |
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. |
42664 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 |
* or visit www.oracle.com if you need additional information or have any |
|
21 |
* questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
25 |
#include "precompiled.hpp" |
|
26 |
#include "c1/c1_MacroAssembler.hpp" |
|
27 |
#include "c1/c1_Runtime1.hpp" |
|
28 |
#include "classfile/systemDictionary.hpp" |
|
29 |
#include "gc/shared/collectedHeap.hpp" |
|
30 |
#include "interpreter/interpreter.hpp" |
|
31 |
#include "oops/arrayOop.hpp" |
|
57811 | 32 |
#include "oops/markWord.hpp" |
42664 | 33 |
#include "runtime/basicLock.hpp" |
34 |
#include "runtime/biasedLocking.hpp" |
|
35 |
#include "runtime/os.hpp" |
|
36 |
#include "runtime/sharedRuntime.hpp" |
|
37 |
#include "runtime/stubRoutines.hpp" |
|
38 |
||
39 |
// Note: Rtemp usage is this file should not impact C2 and should be |
|
40 |
// correct as long as it is not implicitly used in lower layers (the |
|
41 |
// arm [macro]assembler) and used with care in the other C1 specific |
|
42 |
// files. |
|
43 |
||
44 |
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) { |
|
45 |
Label verified; |
|
46 |
load_klass(Rtemp, receiver); |
|
47 |
cmp(Rtemp, iCache); |
|
48 |
b(verified, eq); // jump over alignment no-ops |
|
49 |
jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type); |
|
50 |
align(CodeEntryAlignment); |
|
51 |
bind(verified); |
|
52 |
} |
|
53 |
||
54 |
void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) { |
|
55 |
assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect"); |
|
56 |
assert((frame_size_in_bytes % StackAlignmentInBytes) == 0, "frame size should be aligned"); |
|
57 |
||
58 |
||
59 |
arm_stack_overflow_check(bang_size_in_bytes, Rtemp); |
|
60 |
||
61 |
// FP can no longer be used to memorize SP. It may be modified |
|
62 |
// if this method contains a methodHandle call site |
|
63 |
raw_push(FP, LR); |
|
64 |
sub_slow(SP, SP, frame_size_in_bytes); |
|
65 |
} |
|
66 |
||
67 |
void C1_MacroAssembler::remove_frame(int frame_size_in_bytes) { |
|
68 |
add_slow(SP, SP, frame_size_in_bytes); |
|
69 |
raw_pop(FP, LR); |
|
70 |
} |
|
71 |
||
72 |
void C1_MacroAssembler::verified_entry() { |
|
73 |
if (C1Breakpoint) { |
|
74 |
breakpoint(); |
|
75 |
} |
|
76 |
} |
|
77 |
||
78 |
// Puts address of allocated object into register `obj` and end of allocated object into register `obj_end`. |
|
79 |
void C1_MacroAssembler::try_allocate(Register obj, Register obj_end, Register tmp1, Register tmp2, |
|
80 |
RegisterOrConstant size_expression, Label& slow_case) { |
|
81 |
if (UseTLAB) { |
|
82 |
tlab_allocate(obj, obj_end, tmp1, size_expression, slow_case); |
|
83 |
} else { |
|
84 |
eden_allocate(obj, obj_end, tmp1, tmp2, size_expression, slow_case); |
|
85 |
} |
|
86 |
} |
|
87 |
||
88 |
||
89 |
void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register tmp) { |
|
90 |
assert_different_registers(obj, klass, len, tmp); |
|
91 |
||
92 |
if(UseBiasedLocking && !len->is_valid()) { |
|
93 |
ldr(tmp, Address(klass, Klass::prototype_header_offset())); |
|
94 |
} else { |
|
57777
90ead0febf56
8229258: Rework markOop and markOopDesc into a simpler mark word value carrier
stefank
parents:
52351
diff
changeset
|
95 |
mov(tmp, (intptr_t)markWord::prototype().value()); |
42664 | 96 |
} |
97 |
||
98 |
str(tmp, Address(obj, oopDesc::mark_offset_in_bytes())); |
|
99 |
str(klass, Address(obj, oopDesc::klass_offset_in_bytes())); |
|
100 |
||
101 |
if (len->is_valid()) { |
|
102 |
str_32(len, Address(obj, arrayOopDesc::length_offset_in_bytes())); |
|
103 |
} |
|
104 |
} |
|
105 |
||
106 |
||
107 |
// Cleans object body [base..obj_end]. Clobbers `base` and `tmp` registers. |
|
108 |
void C1_MacroAssembler::initialize_body(Register base, Register obj_end, Register tmp) { |
|
109 |
zero_memory(base, obj_end, tmp); |
|
110 |
} |
|
111 |
||
112 |
||
113 |
void C1_MacroAssembler::initialize_object(Register obj, Register obj_end, Register klass, |
|
114 |
Register len, Register tmp1, Register tmp2, |
|
115 |
RegisterOrConstant header_size, int obj_size_in_bytes, |
|
116 |
bool is_tlab_allocated) |
|
117 |
{ |
|
118 |
assert_different_registers(obj, obj_end, klass, len, tmp1, tmp2); |
|
119 |
initialize_header(obj, klass, len, tmp1); |
|
120 |
||
121 |
const Register ptr = tmp2; |
|
122 |
||
123 |
if (!(UseTLAB && ZeroTLAB && is_tlab_allocated)) { |
|
124 |
if (obj_size_in_bytes >= 0 && obj_size_in_bytes <= 8 * BytesPerWord) { |
|
125 |
mov(tmp1, 0); |
|
126 |
const int base = instanceOopDesc::header_size() * HeapWordSize; |
|
127 |
for (int i = base; i < obj_size_in_bytes; i += wordSize) { |
|
128 |
str(tmp1, Address(obj, i)); |
|
129 |
} |
|
130 |
} else { |
|
131 |
assert(header_size.is_constant() || header_size.as_register() == ptr, "code assumption"); |
|
132 |
add(ptr, obj, header_size); |
|
133 |
initialize_body(ptr, obj_end, tmp1); |
|
134 |
} |
|
135 |
} |
|
136 |
||
137 |
// StoreStore barrier required after complete initialization |
|
138 |
// (headers + content zeroing), before the object may escape. |
|
139 |
membar(MacroAssembler::StoreStore, tmp1); |
|
140 |
} |
|
141 |
||
142 |
void C1_MacroAssembler::allocate_object(Register obj, Register tmp1, Register tmp2, Register tmp3, |
|
143 |
int header_size, int object_size, |
|
144 |
Register klass, Label& slow_case) { |
|
145 |
assert_different_registers(obj, tmp1, tmp2, tmp3, klass, Rtemp); |
|
146 |
assert(header_size >= 0 && object_size >= header_size, "illegal sizes"); |
|
147 |
const int object_size_in_bytes = object_size * BytesPerWord; |
|
148 |
||
149 |
const Register obj_end = tmp1; |
|
150 |
const Register len = noreg; |
|
151 |
||
152 |
if (Assembler::is_arith_imm_in_range(object_size_in_bytes)) { |
|
153 |
try_allocate(obj, obj_end, tmp2, tmp3, object_size_in_bytes, slow_case); |
|
154 |
} else { |
|
155 |
// Rtemp should be free at c1 LIR level |
|
156 |
mov_slow(Rtemp, object_size_in_bytes); |
|
157 |
try_allocate(obj, obj_end, tmp2, tmp3, Rtemp, slow_case); |
|
158 |
} |
|
159 |
initialize_object(obj, obj_end, klass, len, tmp2, tmp3, instanceOopDesc::header_size() * HeapWordSize, object_size_in_bytes, /* is_tlab_allocated */ UseTLAB); |
|
160 |
} |
|
161 |
||
162 |
void C1_MacroAssembler::allocate_array(Register obj, Register len, |
|
163 |
Register tmp1, Register tmp2, Register tmp3, |
|
164 |
int header_size, int element_size, |
|
165 |
Register klass, Label& slow_case) { |
|
166 |
assert_different_registers(obj, len, tmp1, tmp2, tmp3, klass, Rtemp); |
|
167 |
const int header_size_in_bytes = header_size * BytesPerWord; |
|
168 |
const int scale_shift = exact_log2(element_size); |
|
169 |
const Register obj_size = Rtemp; // Rtemp should be free at c1 LIR level |
|
170 |
||
171 |
cmp_32(len, max_array_allocation_length); |
|
172 |
b(slow_case, hs); |
|
173 |
||
174 |
bool align_header = ((header_size_in_bytes | element_size) & MinObjAlignmentInBytesMask) != 0; |
|
175 |
assert(align_header || ((header_size_in_bytes & MinObjAlignmentInBytesMask) == 0), "must be"); |
|
176 |
assert(align_header || ((element_size & MinObjAlignmentInBytesMask) == 0), "must be"); |
|
177 |
||
178 |
mov(obj_size, header_size_in_bytes + (align_header ? (MinObjAlignmentInBytes - 1) : 0)); |
|
179 |
add_ptr_scaled_int32(obj_size, obj_size, len, scale_shift); |
|
180 |
||
181 |
if (align_header) { |
|
182 |
align_reg(obj_size, obj_size, MinObjAlignmentInBytes); |
|
183 |
} |
|
184 |
||
185 |
try_allocate(obj, tmp1, tmp2, tmp3, obj_size, slow_case); |
|
186 |
initialize_object(obj, tmp1, klass, len, tmp2, tmp3, header_size_in_bytes, -1, /* is_tlab_allocated */ UseTLAB); |
|
187 |
} |
|
188 |
||
189 |
int C1_MacroAssembler::lock_object(Register hdr, Register obj, |
|
190 |
Register disp_hdr, Register tmp1, |
|
191 |
Label& slow_case) { |
|
192 |
Label done, fast_lock, fast_lock_done; |
|
193 |
int null_check_offset = 0; |
|
194 |
||
195 |
const Register tmp2 = Rtemp; // Rtemp should be free at c1 LIR level |
|
196 |
assert_different_registers(hdr, obj, disp_hdr, tmp1, tmp2); |
|
197 |
||
198 |
assert(BasicObjectLock::lock_offset_in_bytes() == 0, "ajust this code"); |
|
199 |
const int obj_offset = BasicObjectLock::obj_offset_in_bytes(); |
|
200 |
const int mark_offset = BasicLock::displaced_header_offset_in_bytes(); |
|
201 |
||
202 |
if (UseBiasedLocking) { |
|
203 |
// load object |
|
204 |
str(obj, Address(disp_hdr, obj_offset)); |
|
205 |
null_check_offset = biased_locking_enter(obj, hdr/*scratched*/, tmp1, false, tmp2, done, slow_case); |
|
206 |
} |
|
207 |
||
208 |
assert(oopDesc::mark_offset_in_bytes() == 0, "Required by atomic instructions"); |
|
209 |
||
210 |
||
211 |
if (!UseBiasedLocking) { |
|
212 |
null_check_offset = offset(); |
|
213 |
} |
|
214 |
||
215 |
// On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread. |
|
216 |
// That would be acceptable as ether CAS or slow case path is taken in that case. |
|
217 |
||
218 |
// Must be the first instruction here, because implicit null check relies on it |
|
219 |
ldr(hdr, Address(obj, oopDesc::mark_offset_in_bytes())); |
|
220 |
||
221 |
str(obj, Address(disp_hdr, obj_offset)); |
|
57777
90ead0febf56
8229258: Rework markOop and markOopDesc into a simpler mark word value carrier
stefank
parents:
52351
diff
changeset
|
222 |
tst(hdr, markWord::unlocked_value); |
42664 | 223 |
b(fast_lock, ne); |
224 |
||
225 |
// Check for recursive locking |
|
226 |
// See comments in InterpreterMacroAssembler::lock_object for |
|
227 |
// explanations on the fast recursive locking check. |
|
228 |
// -1- test low 2 bits |
|
229 |
movs(tmp2, AsmOperand(hdr, lsl, 30)); |
|
230 |
// -2- test (hdr - SP) if the low two bits are 0 |
|
231 |
sub(tmp2, hdr, SP, eq); |
|
232 |
movs(tmp2, AsmOperand(tmp2, lsr, exact_log2(os::vm_page_size())), eq); |
|
233 |
// If 'eq' then OK for recursive fast locking: store 0 into a lock record. |
|
234 |
str(tmp2, Address(disp_hdr, mark_offset), eq); |
|
235 |
b(fast_lock_done, eq); |
|
236 |
// else need slow case |
|
237 |
b(slow_case); |
|
238 |
||
239 |
||
240 |
bind(fast_lock); |
|
241 |
// Save previous object header in BasicLock structure and update the header |
|
242 |
str(hdr, Address(disp_hdr, mark_offset)); |
|
243 |
||
244 |
cas_for_lock_acquire(hdr, disp_hdr, obj, tmp2, slow_case); |
|
245 |
||
246 |
bind(fast_lock_done); |
|
247 |
||
248 |
#ifndef PRODUCT |
|
249 |
if (PrintBiasedLockingStatistics) { |
|
250 |
cond_atomic_inc32(al, BiasedLocking::fast_path_entry_count_addr()); |
|
251 |
} |
|
252 |
#endif // !PRODUCT |
|
253 |
||
254 |
bind(done); |
|
255 |
||
256 |
return null_check_offset; |
|
257 |
} |
|
258 |
||
259 |
void C1_MacroAssembler::unlock_object(Register hdr, Register obj, |
|
260 |
Register disp_hdr, Register tmp, |
|
261 |
Label& slow_case) { |
|
262 |
// Note: this method is not using its 'tmp' argument |
|
263 |
||
264 |
assert_different_registers(hdr, obj, disp_hdr, Rtemp); |
|
265 |
Register tmp2 = Rtemp; |
|
266 |
||
267 |
assert(BasicObjectLock::lock_offset_in_bytes() == 0, "ajust this code"); |
|
268 |
const int obj_offset = BasicObjectLock::obj_offset_in_bytes(); |
|
269 |
const int mark_offset = BasicLock::displaced_header_offset_in_bytes(); |
|
270 |
||
271 |
Label done; |
|
272 |
if (UseBiasedLocking) { |
|
273 |
// load object |
|
274 |
ldr(obj, Address(disp_hdr, obj_offset)); |
|
275 |
biased_locking_exit(obj, hdr, done); |
|
276 |
} |
|
277 |
||
278 |
assert(oopDesc::mark_offset_in_bytes() == 0, "Required by atomic instructions"); |
|
279 |
||
280 |
// Load displaced header and object from the lock |
|
281 |
ldr(hdr, Address(disp_hdr, mark_offset)); |
|
282 |
// If hdr is NULL, we've got recursive locking and there's nothing more to do |
|
283 |
cbz(hdr, done); |
|
284 |
||
285 |
if(!UseBiasedLocking) { |
|
286 |
// load object |
|
287 |
ldr(obj, Address(disp_hdr, obj_offset)); |
|
288 |
} |
|
289 |
||
290 |
// Restore the object header |
|
291 |
cas_for_lock_release(disp_hdr, hdr, obj, tmp2, slow_case); |
|
292 |
||
293 |
bind(done); |
|
294 |
} |
|
295 |
||
296 |
||
297 |
#ifndef PRODUCT |
|
298 |
||
299 |
void C1_MacroAssembler::verify_stack_oop(int stack_offset) { |
|
300 |
if (!VerifyOops) return; |
|
301 |
verify_oop_addr(Address(SP, stack_offset)); |
|
302 |
} |
|
303 |
||
304 |
void C1_MacroAssembler::verify_not_null_oop(Register r) { |
|
305 |
Label not_null; |
|
306 |
cbnz(r, not_null); |
|
307 |
stop("non-null oop required"); |
|
308 |
bind(not_null); |
|
309 |
if (!VerifyOops) return; |
|
310 |
verify_oop(r); |
|
311 |
} |
|
312 |
||
313 |
#endif // !PRODUCT |