author | stefank |
Wed, 07 Dec 2011 11:35:03 +0100 | |
changeset 11430 | 718fc06da49a |
parent 7724 | a92d706dbdd5 |
child 11791 | 3be8cae67887 |
permissions | -rw-r--r-- |
1 | 1 |
/* |
7724
a92d706dbdd5
7003271: Hotspot should track cumulative Java heap bytes allocated on a per-thread basis
phh
parents:
7427
diff
changeset
|
2 |
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. |
1 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
5547
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5046
diff
changeset
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5046
diff
changeset
|
20 |
* or visit www.oracle.com if you need additional information or have any |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
5046
diff
changeset
|
21 |
* questions. |
1 | 22 |
* |
23 |
*/ |
|
24 |
||
7397 | 25 |
#include "precompiled.hpp" |
26 |
#include "c1/c1_MacroAssembler.hpp" |
|
27 |
#include "c1/c1_Runtime1.hpp" |
|
28 |
#include "classfile/systemDictionary.hpp" |
|
29 |
#include "gc_interface/collectedHeap.hpp" |
|
30 |
#include "interpreter/interpreter.hpp" |
|
31 |
#include "oops/arrayOop.hpp" |
|
32 |
#include "oops/markOop.hpp" |
|
33 |
#include "runtime/basicLock.hpp" |
|
34 |
#include "runtime/biasedLocking.hpp" |
|
35 |
#include "runtime/os.hpp" |
|
36 |
#include "runtime/stubRoutines.hpp" |
|
1 | 37 |
|
38 |
int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register scratch, Label& slow_case) { |
|
1066 | 39 |
const int aligned_mask = BytesPerWord -1; |
1 | 40 |
const int hdr_offset = oopDesc::mark_offset_in_bytes(); |
41 |
assert(hdr == rax, "hdr must be rax, for the cmpxchg instruction"); |
|
42 |
assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different"); |
|
43 |
Label done; |
|
44 |
int null_check_offset = -1; |
|
45 |
||
46 |
verify_oop(obj); |
|
47 |
||
48 |
// save object being locked into the BasicObjectLock |
|
1066 | 49 |
movptr(Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()), obj); |
1 | 50 |
|
51 |
if (UseBiasedLocking) { |
|
52 |
assert(scratch != noreg, "should have scratch register at this point"); |
|
53 |
null_check_offset = biased_locking_enter(disp_hdr, obj, hdr, scratch, false, done, &slow_case); |
|
54 |
} else { |
|
55 |
null_check_offset = offset(); |
|
56 |
} |
|
57 |
||
58 |
// Load object header |
|
1066 | 59 |
movptr(hdr, Address(obj, hdr_offset)); |
1 | 60 |
// and mark it as unlocked |
1066 | 61 |
orptr(hdr, markOopDesc::unlocked_value); |
1 | 62 |
// save unlocked object header into the displaced header location on the stack |
1066 | 63 |
movptr(Address(disp_hdr, 0), hdr); |
1 | 64 |
// test if object header is still the same (i.e. unlocked), and if so, store the |
65 |
// displaced header address in the object header - if it is not the same, get the |
|
66 |
// object header instead |
|
67 |
if (os::is_MP()) MacroAssembler::lock(); // must be immediately before cmpxchg! |
|
1066 | 68 |
cmpxchgptr(disp_hdr, Address(obj, hdr_offset)); |
1 | 69 |
// if the object header was the same, we're done |
70 |
if (PrintBiasedLockingStatistics) { |
|
71 |
cond_inc32(Assembler::equal, |
|
72 |
ExternalAddress((address)BiasedLocking::fast_path_entry_count_addr())); |
|
73 |
} |
|
74 |
jcc(Assembler::equal, done); |
|
75 |
// if the object header was not the same, it is now in the hdr register |
|
76 |
// => test if it is a stack pointer into the same stack (recursive locking), i.e.: |
|
77 |
// |
|
78 |
// 1) (hdr & aligned_mask) == 0 |
|
79 |
// 2) rsp <= hdr |
|
80 |
// 3) hdr <= rsp + page_size |
|
81 |
// |
|
82 |
// these 3 tests can be done by evaluating the following expression: |
|
83 |
// |
|
84 |
// (hdr - rsp) & (aligned_mask - page_size) |
|
85 |
// |
|
86 |
// assuming both the stack pointer and page_size have their least |
|
87 |
// significant 2 bits cleared and page_size is a power of 2 |
|
1066 | 88 |
subptr(hdr, rsp); |
89 |
andptr(hdr, aligned_mask - os::vm_page_size()); |
|
1 | 90 |
// for recursive locking, the result is zero => save it in the displaced header |
91 |
// location (NULL in the displaced hdr location indicates recursive locking) |
|
1066 | 92 |
movptr(Address(disp_hdr, 0), hdr); |
1 | 93 |
// otherwise we don't care about the result and handle locking via runtime call |
94 |
jcc(Assembler::notZero, slow_case); |
|
95 |
// done |
|
96 |
bind(done); |
|
97 |
return null_check_offset; |
|
98 |
} |
|
99 |
||
100 |
||
101 |
void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) { |
|
1066 | 102 |
const int aligned_mask = BytesPerWord -1; |
1 | 103 |
const int hdr_offset = oopDesc::mark_offset_in_bytes(); |
104 |
assert(disp_hdr == rax, "disp_hdr must be rax, for the cmpxchg instruction"); |
|
105 |
assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different"); |
|
106 |
Label done; |
|
107 |
||
108 |
if (UseBiasedLocking) { |
|
109 |
// load object |
|
1066 | 110 |
movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes())); |
1 | 111 |
biased_locking_exit(obj, hdr, done); |
112 |
} |
|
113 |
||
114 |
// load displaced header |
|
1066 | 115 |
movptr(hdr, Address(disp_hdr, 0)); |
1 | 116 |
// if the loaded hdr is NULL we had recursive locking |
1066 | 117 |
testptr(hdr, hdr); |
1 | 118 |
// if we had recursive locking, we are done |
119 |
jcc(Assembler::zero, done); |
|
120 |
if (!UseBiasedLocking) { |
|
121 |
// load object |
|
1066 | 122 |
movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes())); |
1 | 123 |
} |
124 |
verify_oop(obj); |
|
125 |
// test if object header is pointing to the displaced header, and if so, restore |
|
126 |
// the displaced header in the object - if the object header is not pointing to |
|
127 |
// the displaced header, get the object header instead |
|
128 |
if (os::is_MP()) MacroAssembler::lock(); // must be immediately before cmpxchg! |
|
1066 | 129 |
cmpxchgptr(hdr, Address(obj, hdr_offset)); |
1 | 130 |
// if the object header was not pointing to the displaced header, |
131 |
// we do unlocking via runtime call |
|
132 |
jcc(Assembler::notEqual, slow_case); |
|
133 |
// done |
|
134 |
bind(done); |
|
135 |
} |
|
136 |
||
137 |
||
138 |
// Defines obj, preserves var_size_in_bytes |
|
139 |
void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2, Label& slow_case) { |
|
140 |
if (UseTLAB) { |
|
141 |
tlab_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case); |
|
142 |
} else { |
|
143 |
eden_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case); |
|
7724
a92d706dbdd5
7003271: Hotspot should track cumulative Java heap bytes allocated on a per-thread basis
phh
parents:
7427
diff
changeset
|
144 |
incr_allocated_bytes(noreg, var_size_in_bytes, con_size_in_bytes, t1); |
1 | 145 |
} |
146 |
} |
|
147 |
||
148 |
||
149 |
void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) { |
|
150 |
assert_different_registers(obj, klass, len); |
|
151 |
if (UseBiasedLocking && !len->is_valid()) { |
|
152 |
assert_different_registers(obj, klass, len, t1, t2); |
|
11430
718fc06da49a
7118863: Move sizeof(klassOopDesc) into the *Klass::*_offset_in_bytes() functions
stefank
parents:
7724
diff
changeset
|
153 |
movptr(t1, Address(klass, Klass::prototype_header_offset())); |
1066 | 154 |
movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1); |
1 | 155 |
} else { |
1066 | 156 |
// This assumes that all prototype bits fit in an int32_t |
157 |
movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype()); |
|
1 | 158 |
} |
7427 | 159 |
#ifdef _LP64 |
160 |
if (UseCompressedOops) { // Take care not to kill klass |
|
161 |
movptr(t1, klass); |
|
162 |
encode_heap_oop_not_null(t1); |
|
163 |
movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1); |
|
164 |
} else |
|
165 |
#endif |
|
166 |
{ |
|
167 |
movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass); |
|
168 |
} |
|
1 | 169 |
|
170 |
if (len->is_valid()) { |
|
171 |
movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len); |
|
172 |
} |
|
7427 | 173 |
#ifdef _LP64 |
174 |
else if (UseCompressedOops) { |
|
175 |
xorptr(t1, t1); |
|
176 |
store_klass_gap(obj, t1); |
|
177 |
} |
|
178 |
#endif |
|
1 | 179 |
} |
180 |
||
181 |
||
182 |
// preserves obj, destroys len_in_bytes |
|
183 |
void C1_MacroAssembler::initialize_body(Register obj, Register len_in_bytes, int hdr_size_in_bytes, Register t1) { |
|
184 |
Label done; |
|
185 |
assert(obj != len_in_bytes && obj != t1 && t1 != len_in_bytes, "registers must be different"); |
|
186 |
assert((hdr_size_in_bytes & (BytesPerWord - 1)) == 0, "header size is not a multiple of BytesPerWord"); |
|
187 |
Register index = len_in_bytes; |
|
1066 | 188 |
// index is positive and ptr sized |
189 |
subptr(index, hdr_size_in_bytes); |
|
1 | 190 |
jcc(Assembler::zero, done); |
191 |
// initialize topmost word, divide index by 2, check if odd and test if zero |
|
192 |
// note: for the remaining code to work, index must be a multiple of BytesPerWord |
|
193 |
#ifdef ASSERT |
|
194 |
{ Label L; |
|
1066 | 195 |
testptr(index, BytesPerWord - 1); |
1 | 196 |
jcc(Assembler::zero, L); |
197 |
stop("index is not a multiple of BytesPerWord"); |
|
198 |
bind(L); |
|
199 |
} |
|
200 |
#endif |
|
1066 | 201 |
xorptr(t1, t1); // use _zero reg to clear memory (shorter code) |
1 | 202 |
if (UseIncDec) { |
1066 | 203 |
shrptr(index, 3); // divide by 8/16 and set carry flag if bit 2 was set |
1 | 204 |
} else { |
1066 | 205 |
shrptr(index, 2); // use 2 instructions to avoid partial flag stall |
206 |
shrptr(index, 1); |
|
1 | 207 |
} |
1066 | 208 |
#ifndef _LP64 |
1 | 209 |
// index could have been not a multiple of 8 (i.e., bit 2 was set) |
210 |
{ Label even; |
|
211 |
// note: if index was a multiple of 8, than it cannot |
|
212 |
// be 0 now otherwise it must have been 0 before |
|
213 |
// => if it is even, we don't need to check for 0 again |
|
214 |
jcc(Assembler::carryClear, even); |
|
215 |
// clear topmost word (no jump needed if conditional assignment would work here) |
|
1066 | 216 |
movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - 0*BytesPerWord), t1); |
1 | 217 |
// index could be 0 now, need to check again |
218 |
jcc(Assembler::zero, done); |
|
219 |
bind(even); |
|
220 |
} |
|
1066 | 221 |
#endif // !_LP64 |
1 | 222 |
// initialize remaining object fields: rdx is a multiple of 2 now |
223 |
{ Label loop; |
|
224 |
bind(loop); |
|
1066 | 225 |
movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - 1*BytesPerWord), t1); |
226 |
NOT_LP64(movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - 2*BytesPerWord), t1);) |
|
1 | 227 |
decrement(index); |
228 |
jcc(Assembler::notZero, loop); |
|
229 |
} |
|
230 |
||
231 |
// done |
|
232 |
bind(done); |
|
233 |
} |
|
234 |
||
235 |
||
236 |
void C1_MacroAssembler::allocate_object(Register obj, Register t1, Register t2, int header_size, int object_size, Register klass, Label& slow_case) { |
|
237 |
assert(obj == rax, "obj must be in rax, for cmpxchg"); |
|
7724
a92d706dbdd5
7003271: Hotspot should track cumulative Java heap bytes allocated on a per-thread basis
phh
parents:
7427
diff
changeset
|
238 |
assert_different_registers(obj, t1, t2); // XXX really? |
1 | 239 |
assert(header_size >= 0 && object_size >= header_size, "illegal sizes"); |
240 |
||
241 |
try_allocate(obj, noreg, object_size * BytesPerWord, t1, t2, slow_case); |
|
242 |
||
243 |
initialize_object(obj, klass, noreg, object_size * HeapWordSize, t1, t2); |
|
244 |
} |
|
245 |
||
246 |
void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2) { |
|
247 |
assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, |
|
248 |
"con_size_in_bytes is not multiple of alignment"); |
|
7427 | 249 |
const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize; |
1 | 250 |
|
251 |
initialize_header(obj, klass, noreg, t1, t2); |
|
252 |
||
253 |
// clear rest of allocated space |
|
254 |
const Register t1_zero = t1; |
|
255 |
const Register index = t2; |
|
256 |
const int threshold = 6 * BytesPerWord; // approximate break even point for code size (see comments below) |
|
257 |
if (var_size_in_bytes != noreg) { |
|
1066 | 258 |
mov(index, var_size_in_bytes); |
1 | 259 |
initialize_body(obj, index, hdr_size_in_bytes, t1_zero); |
260 |
} else if (con_size_in_bytes <= threshold) { |
|
261 |
// use explicit null stores |
|
262 |
// code size = 2 + 3*n bytes (n = number of fields to clear) |
|
1066 | 263 |
xorptr(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code) |
1 | 264 |
for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += BytesPerWord) |
1066 | 265 |
movptr(Address(obj, i), t1_zero); |
1 | 266 |
} else if (con_size_in_bytes > hdr_size_in_bytes) { |
267 |
// use loop to null out the fields |
|
268 |
// code size = 16 bytes for even n (n = number of fields to clear) |
|
269 |
// initialize last object field first if odd number of fields |
|
1066 | 270 |
xorptr(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code) |
271 |
movptr(index, (con_size_in_bytes - hdr_size_in_bytes) >> 3); |
|
1 | 272 |
// initialize last object field if constant size is odd |
273 |
if (((con_size_in_bytes - hdr_size_in_bytes) & 4) != 0) |
|
1066 | 274 |
movptr(Address(obj, con_size_in_bytes - (1*BytesPerWord)), t1_zero); |
1 | 275 |
// initialize remaining object fields: rdx is a multiple of 2 |
276 |
{ Label loop; |
|
277 |
bind(loop); |
|
1066 | 278 |
movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - (1*BytesPerWord)), |
279 |
t1_zero); |
|
280 |
NOT_LP64(movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - (2*BytesPerWord)), |
|
281 |
t1_zero);) |
|
1 | 282 |
decrement(index); |
283 |
jcc(Assembler::notZero, loop); |
|
284 |
} |
|
285 |
} |
|
286 |
||
2867
69187054225f
6788527: Server vm intermittently fails with assertion "live value must not be garbage" with fastdebug bits
kvn
parents:
1066
diff
changeset
|
287 |
if (CURRENT_ENV->dtrace_alloc_probes()) { |
1 | 288 |
assert(obj == rax, "must be"); |
289 |
call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id))); |
|
290 |
} |
|
291 |
||
292 |
verify_oop(obj); |
|
293 |
} |
|
294 |
||
295 |
void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, Register t2, int header_size, Address::ScaleFactor f, Register klass, Label& slow_case) { |
|
296 |
assert(obj == rax, "obj must be in rax, for cmpxchg"); |
|
297 |
assert_different_registers(obj, len, t1, t2, klass); |
|
298 |
||
299 |
// determine alignment mask |
|
1066 | 300 |
assert(!(BytesPerWord & 1), "must be a multiple of 2 for masking code to work"); |
1 | 301 |
|
302 |
// check for negative or excessive length |
|
1066 | 303 |
cmpptr(len, (int32_t)max_array_allocation_length); |
1 | 304 |
jcc(Assembler::above, slow_case); |
305 |
||
306 |
const Register arr_size = t2; // okay to be the same |
|
307 |
// align object end |
|
1066 | 308 |
movptr(arr_size, (int32_t)header_size * BytesPerWord + MinObjAlignmentInBytesMask); |
309 |
lea(arr_size, Address(arr_size, len, f)); |
|
310 |
andptr(arr_size, ~MinObjAlignmentInBytesMask); |
|
1 | 311 |
|
312 |
try_allocate(obj, arr_size, 0, t1, t2, slow_case); |
|
313 |
||
314 |
initialize_header(obj, klass, len, t1, t2); |
|
315 |
||
316 |
// clear rest of allocated space |
|
317 |
const Register len_zero = len; |
|
318 |
initialize_body(obj, arr_size, header_size * BytesPerWord, len_zero); |
|
319 |
||
2867
69187054225f
6788527: Server vm intermittently fails with assertion "live value must not be garbage" with fastdebug bits
kvn
parents:
1066
diff
changeset
|
320 |
if (CURRENT_ENV->dtrace_alloc_probes()) { |
1 | 321 |
assert(obj == rax, "must be"); |
322 |
call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id))); |
|
323 |
} |
|
324 |
||
325 |
verify_oop(obj); |
|
326 |
} |
|
327 |
||
328 |
||
329 |
||
330 |
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) { |
|
331 |
verify_oop(receiver); |
|
332 |
// explicit NULL check not needed since load from [klass_offset] causes a trap |
|
333 |
// check against inline cache |
|
334 |
assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check"); |
|
335 |
int start_offset = offset(); |
|
7427 | 336 |
|
337 |
if (UseCompressedOops) { |
|
338 |
load_klass(rscratch1, receiver); |
|
339 |
cmpptr(rscratch1, iCache); |
|
340 |
} else { |
|
341 |
cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes())); |
|
342 |
} |
|
1 | 343 |
// if icache check fails, then jump to runtime routine |
344 |
// Note: RECEIVER must still contain the receiver! |
|
345 |
jump_cc(Assembler::notEqual, |
|
346 |
RuntimeAddress(SharedRuntime::get_ic_miss_stub())); |
|
1066 | 347 |
const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9); |
7427 | 348 |
assert(UseCompressedOops || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry"); |
1 | 349 |
} |
350 |
||
351 |
||
352 |
void C1_MacroAssembler::build_frame(int frame_size_in_bytes) { |
|
353 |
// Make sure there is enough stack space for this method's activation. |
|
354 |
// Note that we do this before doing an enter(). This matches the |
|
355 |
// ordering of C2's stack overflow check / rsp decrement and allows |
|
356 |
// the SharedRuntime stack overflow handling to be consistent |
|
357 |
// between the two compilers. |
|
358 |
generate_stack_overflow_check(frame_size_in_bytes); |
|
359 |
||
5046 | 360 |
push(rbp); |
1 | 361 |
#ifdef TIERED |
362 |
// c2 leaves fpu stack dirty. Clean it on entry |
|
363 |
if (UseSSE < 2 ) { |
|
364 |
empty_FPU_stack(); |
|
365 |
} |
|
366 |
#endif // TIERED |
|
367 |
decrement(rsp, frame_size_in_bytes); // does not emit code for frame_size == 0 |
|
368 |
} |
|
369 |
||
370 |
||
5046 | 371 |
void C1_MacroAssembler::remove_frame(int frame_size_in_bytes) { |
372 |
increment(rsp, frame_size_in_bytes); // Does not emit code for frame_size == 0 |
|
373 |
pop(rbp); |
|
374 |
} |
|
375 |
||
376 |
||
1 | 377 |
void C1_MacroAssembler::unverified_entry(Register receiver, Register ic_klass) { |
378 |
if (C1Breakpoint) int3(); |
|
379 |
inline_cache_check(receiver, ic_klass); |
|
380 |
} |
|
381 |
||
382 |
||
383 |
void C1_MacroAssembler::verified_entry() { |
|
384 |
if (C1Breakpoint)int3(); |
|
385 |
// build frame |
|
386 |
verify_FPU(0, "method_entry"); |
|
387 |
} |
|
388 |
||
389 |
||
390 |
#ifndef PRODUCT |
|
391 |
||
392 |
void C1_MacroAssembler::verify_stack_oop(int stack_offset) { |
|
393 |
if (!VerifyOops) return; |
|
394 |
verify_oop_addr(Address(rsp, stack_offset)); |
|
395 |
} |
|
396 |
||
397 |
void C1_MacroAssembler::verify_not_null_oop(Register r) { |
|
398 |
if (!VerifyOops) return; |
|
399 |
Label not_null; |
|
1066 | 400 |
testptr(r, r); |
1 | 401 |
jcc(Assembler::notZero, not_null); |
402 |
stop("non-null oop required"); |
|
403 |
bind(not_null); |
|
404 |
verify_oop(r); |
|
405 |
} |
|
406 |
||
407 |
void C1_MacroAssembler::invalidate_registers(bool inv_rax, bool inv_rbx, bool inv_rcx, bool inv_rdx, bool inv_rsi, bool inv_rdi) { |
|
408 |
#ifdef ASSERT |
|
1066 | 409 |
if (inv_rax) movptr(rax, 0xDEAD); |
410 |
if (inv_rbx) movptr(rbx, 0xDEAD); |
|
411 |
if (inv_rcx) movptr(rcx, 0xDEAD); |
|
412 |
if (inv_rdx) movptr(rdx, 0xDEAD); |
|
413 |
if (inv_rsi) movptr(rsi, 0xDEAD); |
|
414 |
if (inv_rdi) movptr(rdi, 0xDEAD); |
|
1 | 415 |
#endif |
416 |
} |
|
417 |
||
418 |
#endif // ifndef PRODUCT |