1
|
1 |
/*
|
|
2 |
* Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved.
|
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 |
*
|
|
5 |
* This code is free software; you can redistribute it and/or modify it
|
|
6 |
* under the terms of the GNU General Public License version 2 only, as
|
|
7 |
* published by the Free Software Foundation.
|
|
8 |
*
|
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
13 |
* accompanied this code).
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License version
|
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 |
*
|
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 |
* have any questions.
|
|
22 |
*
|
|
23 |
*/
|
|
24 |
|
|
25 |
#include "incls/_precompiled.incl"
|
|
26 |
#include "incls/_c1_MacroAssembler_x86.cpp.incl"
|
|
27 |
|
|
28 |
int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register scratch, Label& slow_case) {
|
|
29 |
const int aligned_mask = 3;
|
|
30 |
const int hdr_offset = oopDesc::mark_offset_in_bytes();
|
|
31 |
assert(hdr == rax, "hdr must be rax, for the cmpxchg instruction");
|
|
32 |
assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different");
|
|
33 |
assert(BytesPerWord == 4, "adjust aligned_mask and code");
|
|
34 |
Label done;
|
|
35 |
int null_check_offset = -1;
|
|
36 |
|
|
37 |
verify_oop(obj);
|
|
38 |
|
|
39 |
// save object being locked into the BasicObjectLock
|
|
40 |
movl(Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()), obj);
|
|
41 |
|
|
42 |
if (UseBiasedLocking) {
|
|
43 |
assert(scratch != noreg, "should have scratch register at this point");
|
|
44 |
null_check_offset = biased_locking_enter(disp_hdr, obj, hdr, scratch, false, done, &slow_case);
|
|
45 |
} else {
|
|
46 |
null_check_offset = offset();
|
|
47 |
}
|
|
48 |
|
|
49 |
// Load object header
|
|
50 |
movl(hdr, Address(obj, hdr_offset));
|
|
51 |
// and mark it as unlocked
|
|
52 |
orl(hdr, markOopDesc::unlocked_value);
|
|
53 |
// save unlocked object header into the displaced header location on the stack
|
|
54 |
movl(Address(disp_hdr, 0), hdr);
|
|
55 |
// test if object header is still the same (i.e. unlocked), and if so, store the
|
|
56 |
// displaced header address in the object header - if it is not the same, get the
|
|
57 |
// object header instead
|
|
58 |
if (os::is_MP()) MacroAssembler::lock(); // must be immediately before cmpxchg!
|
|
59 |
cmpxchg(disp_hdr, Address(obj, hdr_offset));
|
|
60 |
// if the object header was the same, we're done
|
|
61 |
if (PrintBiasedLockingStatistics) {
|
|
62 |
cond_inc32(Assembler::equal,
|
|
63 |
ExternalAddress((address)BiasedLocking::fast_path_entry_count_addr()));
|
|
64 |
}
|
|
65 |
jcc(Assembler::equal, done);
|
|
66 |
// if the object header was not the same, it is now in the hdr register
|
|
67 |
// => test if it is a stack pointer into the same stack (recursive locking), i.e.:
|
|
68 |
//
|
|
69 |
// 1) (hdr & aligned_mask) == 0
|
|
70 |
// 2) rsp <= hdr
|
|
71 |
// 3) hdr <= rsp + page_size
|
|
72 |
//
|
|
73 |
// these 3 tests can be done by evaluating the following expression:
|
|
74 |
//
|
|
75 |
// (hdr - rsp) & (aligned_mask - page_size)
|
|
76 |
//
|
|
77 |
// assuming both the stack pointer and page_size have their least
|
|
78 |
// significant 2 bits cleared and page_size is a power of 2
|
|
79 |
subl(hdr, rsp);
|
|
80 |
andl(hdr, aligned_mask - os::vm_page_size());
|
|
81 |
// for recursive locking, the result is zero => save it in the displaced header
|
|
82 |
// location (NULL in the displaced hdr location indicates recursive locking)
|
|
83 |
movl(Address(disp_hdr, 0), hdr);
|
|
84 |
// otherwise we don't care about the result and handle locking via runtime call
|
|
85 |
jcc(Assembler::notZero, slow_case);
|
|
86 |
// done
|
|
87 |
bind(done);
|
|
88 |
return null_check_offset;
|
|
89 |
}
|
|
90 |
|
|
91 |
|
|
92 |
void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
|
|
93 |
const int aligned_mask = 3;
|
|
94 |
const int hdr_offset = oopDesc::mark_offset_in_bytes();
|
|
95 |
assert(disp_hdr == rax, "disp_hdr must be rax, for the cmpxchg instruction");
|
|
96 |
assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different");
|
|
97 |
assert(BytesPerWord == 4, "adjust aligned_mask and code");
|
|
98 |
Label done;
|
|
99 |
|
|
100 |
if (UseBiasedLocking) {
|
|
101 |
// load object
|
|
102 |
movl(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
|
|
103 |
biased_locking_exit(obj, hdr, done);
|
|
104 |
}
|
|
105 |
|
|
106 |
// load displaced header
|
|
107 |
movl(hdr, Address(disp_hdr, 0));
|
|
108 |
// if the loaded hdr is NULL we had recursive locking
|
|
109 |
testl(hdr, hdr);
|
|
110 |
// if we had recursive locking, we are done
|
|
111 |
jcc(Assembler::zero, done);
|
|
112 |
if (!UseBiasedLocking) {
|
|
113 |
// load object
|
|
114 |
movl(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
|
|
115 |
}
|
|
116 |
verify_oop(obj);
|
|
117 |
// test if object header is pointing to the displaced header, and if so, restore
|
|
118 |
// the displaced header in the object - if the object header is not pointing to
|
|
119 |
// the displaced header, get the object header instead
|
|
120 |
if (os::is_MP()) MacroAssembler::lock(); // must be immediately before cmpxchg!
|
|
121 |
cmpxchg(hdr, Address(obj, hdr_offset));
|
|
122 |
// if the object header was not pointing to the displaced header,
|
|
123 |
// we do unlocking via runtime call
|
|
124 |
jcc(Assembler::notEqual, slow_case);
|
|
125 |
// done
|
|
126 |
bind(done);
|
|
127 |
}
|
|
128 |
|
|
129 |
|
|
130 |
// Defines obj, preserves var_size_in_bytes
|
|
131 |
void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2, Label& slow_case) {
|
|
132 |
if (UseTLAB) {
|
|
133 |
tlab_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
|
|
134 |
} else {
|
|
135 |
eden_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case);
|
|
136 |
}
|
|
137 |
}
|
|
138 |
|
|
139 |
|
|
140 |
void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) {
|
|
141 |
assert_different_registers(obj, klass, len);
|
|
142 |
if (UseBiasedLocking && !len->is_valid()) {
|
|
143 |
assert_different_registers(obj, klass, len, t1, t2);
|
|
144 |
movl(t1, Address(klass, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
|
|
145 |
movl(Address(obj, oopDesc::mark_offset_in_bytes()), t1);
|
|
146 |
} else {
|
|
147 |
movl(Address(obj, oopDesc::mark_offset_in_bytes ()), (int)markOopDesc::prototype());
|
|
148 |
}
|
|
149 |
|
|
150 |
movl(Address(obj, oopDesc::klass_offset_in_bytes()), klass);
|
|
151 |
if (len->is_valid()) {
|
|
152 |
movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len);
|
|
153 |
}
|
|
154 |
}
|
|
155 |
|
|
156 |
|
|
157 |
// preserves obj, destroys len_in_bytes
|
|
158 |
void C1_MacroAssembler::initialize_body(Register obj, Register len_in_bytes, int hdr_size_in_bytes, Register t1) {
|
|
159 |
Label done;
|
|
160 |
assert(obj != len_in_bytes && obj != t1 && t1 != len_in_bytes, "registers must be different");
|
|
161 |
assert((hdr_size_in_bytes & (BytesPerWord - 1)) == 0, "header size is not a multiple of BytesPerWord");
|
|
162 |
Register index = len_in_bytes;
|
|
163 |
subl(index, hdr_size_in_bytes);
|
|
164 |
jcc(Assembler::zero, done);
|
|
165 |
// initialize topmost word, divide index by 2, check if odd and test if zero
|
|
166 |
// note: for the remaining code to work, index must be a multiple of BytesPerWord
|
|
167 |
#ifdef ASSERT
|
|
168 |
{ Label L;
|
|
169 |
testl(index, BytesPerWord - 1);
|
|
170 |
jcc(Assembler::zero, L);
|
|
171 |
stop("index is not a multiple of BytesPerWord");
|
|
172 |
bind(L);
|
|
173 |
}
|
|
174 |
#endif
|
|
175 |
xorl(t1, t1); // use _zero reg to clear memory (shorter code)
|
|
176 |
if (UseIncDec) {
|
|
177 |
shrl(index, 3); // divide by 8 and set carry flag if bit 2 was set
|
|
178 |
} else {
|
|
179 |
shrl(index, 2); // use 2 instructions to avoid partial flag stall
|
|
180 |
shrl(index, 1);
|
|
181 |
}
|
|
182 |
// index could have been not a multiple of 8 (i.e., bit 2 was set)
|
|
183 |
{ Label even;
|
|
184 |
// note: if index was a multiple of 8, than it cannot
|
|
185 |
// be 0 now otherwise it must have been 0 before
|
|
186 |
// => if it is even, we don't need to check for 0 again
|
|
187 |
jcc(Assembler::carryClear, even);
|
|
188 |
// clear topmost word (no jump needed if conditional assignment would work here)
|
|
189 |
movl(Address(obj, index, Address::times_8, hdr_size_in_bytes - 0*BytesPerWord), t1);
|
|
190 |
// index could be 0 now, need to check again
|
|
191 |
jcc(Assembler::zero, done);
|
|
192 |
bind(even);
|
|
193 |
}
|
|
194 |
// initialize remaining object fields: rdx is a multiple of 2 now
|
|
195 |
{ Label loop;
|
|
196 |
bind(loop);
|
|
197 |
movl(Address(obj, index, Address::times_8, hdr_size_in_bytes - 1*BytesPerWord), t1);
|
|
198 |
movl(Address(obj, index, Address::times_8, hdr_size_in_bytes - 2*BytesPerWord), t1);
|
|
199 |
decrement(index);
|
|
200 |
jcc(Assembler::notZero, loop);
|
|
201 |
}
|
|
202 |
|
|
203 |
// done
|
|
204 |
bind(done);
|
|
205 |
}
|
|
206 |
|
|
207 |
|
|
208 |
void C1_MacroAssembler::allocate_object(Register obj, Register t1, Register t2, int header_size, int object_size, Register klass, Label& slow_case) {
|
|
209 |
assert(obj == rax, "obj must be in rax, for cmpxchg");
|
|
210 |
assert(obj != t1 && obj != t2 && t1 != t2, "registers must be different"); // XXX really?
|
|
211 |
assert(header_size >= 0 && object_size >= header_size, "illegal sizes");
|
|
212 |
|
|
213 |
try_allocate(obj, noreg, object_size * BytesPerWord, t1, t2, slow_case);
|
|
214 |
|
|
215 |
initialize_object(obj, klass, noreg, object_size * HeapWordSize, t1, t2);
|
|
216 |
}
|
|
217 |
|
|
218 |
void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2) {
|
|
219 |
assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0,
|
|
220 |
"con_size_in_bytes is not multiple of alignment");
|
|
221 |
const int hdr_size_in_bytes = oopDesc::header_size_in_bytes();
|
|
222 |
|
|
223 |
initialize_header(obj, klass, noreg, t1, t2);
|
|
224 |
|
|
225 |
// clear rest of allocated space
|
|
226 |
const Register t1_zero = t1;
|
|
227 |
const Register index = t2;
|
|
228 |
const int threshold = 6 * BytesPerWord; // approximate break even point for code size (see comments below)
|
|
229 |
if (var_size_in_bytes != noreg) {
|
|
230 |
movl(index, var_size_in_bytes);
|
|
231 |
initialize_body(obj, index, hdr_size_in_bytes, t1_zero);
|
|
232 |
} else if (con_size_in_bytes <= threshold) {
|
|
233 |
// use explicit null stores
|
|
234 |
// code size = 2 + 3*n bytes (n = number of fields to clear)
|
|
235 |
xorl(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code)
|
|
236 |
for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += BytesPerWord)
|
|
237 |
movl(Address(obj, i), t1_zero);
|
|
238 |
} else if (con_size_in_bytes > hdr_size_in_bytes) {
|
|
239 |
// use loop to null out the fields
|
|
240 |
// code size = 16 bytes for even n (n = number of fields to clear)
|
|
241 |
// initialize last object field first if odd number of fields
|
|
242 |
xorl(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code)
|
|
243 |
movl(index, (con_size_in_bytes - hdr_size_in_bytes) >> 3);
|
|
244 |
// initialize last object field if constant size is odd
|
|
245 |
if (((con_size_in_bytes - hdr_size_in_bytes) & 4) != 0)
|
|
246 |
movl(Address(obj, con_size_in_bytes - (1*BytesPerWord)), t1_zero);
|
|
247 |
// initialize remaining object fields: rdx is a multiple of 2
|
|
248 |
{ Label loop;
|
|
249 |
bind(loop);
|
|
250 |
movl(Address(obj, index, Address::times_8,
|
|
251 |
hdr_size_in_bytes - (1*BytesPerWord)), t1_zero);
|
|
252 |
movl(Address(obj, index, Address::times_8,
|
|
253 |
hdr_size_in_bytes - (2*BytesPerWord)), t1_zero);
|
|
254 |
decrement(index);
|
|
255 |
jcc(Assembler::notZero, loop);
|
|
256 |
}
|
|
257 |
}
|
|
258 |
|
|
259 |
if (DTraceAllocProbes) {
|
|
260 |
assert(obj == rax, "must be");
|
|
261 |
call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
|
|
262 |
}
|
|
263 |
|
|
264 |
verify_oop(obj);
|
|
265 |
}
|
|
266 |
|
|
267 |
void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, Register t2, int header_size, Address::ScaleFactor f, Register klass, Label& slow_case) {
|
|
268 |
assert(obj == rax, "obj must be in rax, for cmpxchg");
|
|
269 |
assert_different_registers(obj, len, t1, t2, klass);
|
|
270 |
|
|
271 |
// determine alignment mask
|
|
272 |
assert(BytesPerWord == 4, "must be a multiple of 2 for masking code to work");
|
|
273 |
|
|
274 |
// check for negative or excessive length
|
|
275 |
cmpl(len, max_array_allocation_length);
|
|
276 |
jcc(Assembler::above, slow_case);
|
|
277 |
|
|
278 |
const Register arr_size = t2; // okay to be the same
|
|
279 |
// align object end
|
|
280 |
movl(arr_size, header_size * BytesPerWord + MinObjAlignmentInBytesMask);
|
|
281 |
leal(arr_size, Address(arr_size, len, f));
|
|
282 |
andl(arr_size, ~MinObjAlignmentInBytesMask);
|
|
283 |
|
|
284 |
try_allocate(obj, arr_size, 0, t1, t2, slow_case);
|
|
285 |
|
|
286 |
initialize_header(obj, klass, len, t1, t2);
|
|
287 |
|
|
288 |
// clear rest of allocated space
|
|
289 |
const Register len_zero = len;
|
|
290 |
initialize_body(obj, arr_size, header_size * BytesPerWord, len_zero);
|
|
291 |
|
|
292 |
if (DTraceAllocProbes) {
|
|
293 |
assert(obj == rax, "must be");
|
|
294 |
call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
|
|
295 |
}
|
|
296 |
|
|
297 |
verify_oop(obj);
|
|
298 |
}
|
|
299 |
|
|
300 |
|
|
301 |
|
|
302 |
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
|
|
303 |
verify_oop(receiver);
|
|
304 |
// explicit NULL check not needed since load from [klass_offset] causes a trap
|
|
305 |
// check against inline cache
|
|
306 |
assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
|
|
307 |
int start_offset = offset();
|
|
308 |
cmpl(iCache, Address(receiver, oopDesc::klass_offset_in_bytes()));
|
|
309 |
// if icache check fails, then jump to runtime routine
|
|
310 |
// Note: RECEIVER must still contain the receiver!
|
|
311 |
jump_cc(Assembler::notEqual,
|
|
312 |
RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
|
|
313 |
assert(offset() - start_offset == 9, "check alignment in emit_method_entry");
|
|
314 |
}
|
|
315 |
|
|
316 |
|
|
317 |
void C1_MacroAssembler::method_exit(bool restore_frame) {
|
|
318 |
if (restore_frame) {
|
|
319 |
leave();
|
|
320 |
}
|
|
321 |
ret(0);
|
|
322 |
}
|
|
323 |
|
|
324 |
|
|
325 |
void C1_MacroAssembler::build_frame(int frame_size_in_bytes) {
|
|
326 |
// Make sure there is enough stack space for this method's activation.
|
|
327 |
// Note that we do this before doing an enter(). This matches the
|
|
328 |
// ordering of C2's stack overflow check / rsp decrement and allows
|
|
329 |
// the SharedRuntime stack overflow handling to be consistent
|
|
330 |
// between the two compilers.
|
|
331 |
generate_stack_overflow_check(frame_size_in_bytes);
|
|
332 |
|
|
333 |
enter();
|
|
334 |
#ifdef TIERED
|
|
335 |
// c2 leaves fpu stack dirty. Clean it on entry
|
|
336 |
if (UseSSE < 2 ) {
|
|
337 |
empty_FPU_stack();
|
|
338 |
}
|
|
339 |
#endif // TIERED
|
|
340 |
decrement(rsp, frame_size_in_bytes); // does not emit code for frame_size == 0
|
|
341 |
}
|
|
342 |
|
|
343 |
|
|
344 |
void C1_MacroAssembler::unverified_entry(Register receiver, Register ic_klass) {
|
|
345 |
if (C1Breakpoint) int3();
|
|
346 |
inline_cache_check(receiver, ic_klass);
|
|
347 |
}
|
|
348 |
|
|
349 |
|
|
350 |
void C1_MacroAssembler::verified_entry() {
|
|
351 |
if (C1Breakpoint)int3();
|
|
352 |
// build frame
|
|
353 |
verify_FPU(0, "method_entry");
|
|
354 |
}
|
|
355 |
|
|
356 |
|
|
357 |
#ifndef PRODUCT
|
|
358 |
|
|
359 |
void C1_MacroAssembler::verify_stack_oop(int stack_offset) {
|
|
360 |
if (!VerifyOops) return;
|
|
361 |
verify_oop_addr(Address(rsp, stack_offset));
|
|
362 |
}
|
|
363 |
|
|
364 |
void C1_MacroAssembler::verify_not_null_oop(Register r) {
|
|
365 |
if (!VerifyOops) return;
|
|
366 |
Label not_null;
|
|
367 |
testl(r, r);
|
|
368 |
jcc(Assembler::notZero, not_null);
|
|
369 |
stop("non-null oop required");
|
|
370 |
bind(not_null);
|
|
371 |
verify_oop(r);
|
|
372 |
}
|
|
373 |
|
|
374 |
void C1_MacroAssembler::invalidate_registers(bool inv_rax, bool inv_rbx, bool inv_rcx, bool inv_rdx, bool inv_rsi, bool inv_rdi) {
|
|
375 |
#ifdef ASSERT
|
|
376 |
if (inv_rax) movl(rax, 0xDEAD);
|
|
377 |
if (inv_rbx) movl(rbx, 0xDEAD);
|
|
378 |
if (inv_rcx) movl(rcx, 0xDEAD);
|
|
379 |
if (inv_rdx) movl(rdx, 0xDEAD);
|
|
380 |
if (inv_rsi) movl(rsi, 0xDEAD);
|
|
381 |
if (inv_rdi) movl(rdi, 0xDEAD);
|
|
382 |
#endif
|
|
383 |
}
|
|
384 |
|
|
385 |
#endif // ifndef PRODUCT
|