29184
|
1 |
/*
|
|
2 |
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
|
|
3 |
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
|
4 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
5 |
*
|
|
6 |
* This code is free software; you can redistribute it and/or modify it
|
|
7 |
* under the terms of the GNU General Public License version 2 only, as
|
|
8 |
* published by the Free Software Foundation.
|
|
9 |
*
|
|
10 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
11 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
12 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
13 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
14 |
* accompanied this code).
|
|
15 |
*
|
|
16 |
* You should have received a copy of the GNU General Public License version
|
|
17 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
18 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
19 |
*
|
|
20 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
21 |
* or visit www.oracle.com if you need additional information or have any
|
|
22 |
* questions.
|
|
23 |
*
|
|
24 |
*/
|
|
25 |
|
|
26 |
#include "precompiled.hpp"
|
|
27 |
#include "c1/c1_MacroAssembler.hpp"
|
|
28 |
#include "c1/c1_Runtime1.hpp"
|
|
29 |
#include "classfile/systemDictionary.hpp"
|
|
30 |
#include "gc_interface/collectedHeap.hpp"
|
|
31 |
#include "interpreter/interpreter.hpp"
|
|
32 |
#include "oops/arrayOop.hpp"
|
|
33 |
#include "oops/markOop.hpp"
|
|
34 |
#include "runtime/basicLock.hpp"
|
|
35 |
#include "runtime/biasedLocking.hpp"
|
|
36 |
#include "runtime/os.hpp"
|
|
37 |
#include "runtime/stubRoutines.hpp"
|
|
38 |
|
|
39 |
void C1_MacroAssembler::float_cmp(bool is_float, int unordered_result,
|
|
40 |
FloatRegister f0, FloatRegister f1,
|
|
41 |
Register result)
|
|
42 |
{
|
|
43 |
Label done;
|
|
44 |
if (is_float) {
|
|
45 |
fcmps(f0, f1);
|
|
46 |
} else {
|
|
47 |
fcmpd(f0, f1);
|
|
48 |
}
|
|
49 |
if (unordered_result < 0) {
|
|
50 |
// we want -1 for unordered or less than, 0 for equal and 1 for
|
|
51 |
// greater than.
|
|
52 |
cset(result, NE); // Not equal or unordered
|
|
53 |
cneg(result, result, LT); // Less than or unordered
|
|
54 |
} else {
|
|
55 |
// we want -1 for less than, 0 for equal and 1 for unordered or
|
|
56 |
// greater than.
|
|
57 |
cset(result, NE); // Not equal or unordered
|
|
58 |
cneg(result, result, LO); // Less than
|
|
59 |
}
|
|
60 |
}
|
|
61 |
|
|
62 |
int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register scratch, Label& slow_case) {
|
|
63 |
const int aligned_mask = BytesPerWord -1;
|
|
64 |
const int hdr_offset = oopDesc::mark_offset_in_bytes();
|
|
65 |
assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different");
|
|
66 |
Label done, fail;
|
|
67 |
int null_check_offset = -1;
|
|
68 |
|
|
69 |
verify_oop(obj);
|
|
70 |
|
|
71 |
// save object being locked into the BasicObjectLock
|
|
72 |
str(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
|
|
73 |
|
|
74 |
if (UseBiasedLocking) {
|
|
75 |
assert(scratch != noreg, "should have scratch register at this point");
|
|
76 |
null_check_offset = biased_locking_enter(disp_hdr, obj, hdr, scratch, false, done, &slow_case);
|
|
77 |
} else {
|
|
78 |
null_check_offset = offset();
|
|
79 |
}
|
|
80 |
|
|
81 |
// Load object header
|
|
82 |
ldr(hdr, Address(obj, hdr_offset));
|
|
83 |
// and mark it as unlocked
|
|
84 |
orr(hdr, hdr, markOopDesc::unlocked_value);
|
|
85 |
// save unlocked object header into the displaced header location on the stack
|
|
86 |
str(hdr, Address(disp_hdr, 0));
|
|
87 |
// test if object header is still the same (i.e. unlocked), and if so, store the
|
|
88 |
// displaced header address in the object header - if it is not the same, get the
|
|
89 |
// object header instead
|
|
90 |
lea(rscratch2, Address(obj, hdr_offset));
|
|
91 |
cmpxchgptr(hdr, disp_hdr, rscratch2, rscratch1, done, /*fallthough*/NULL);
|
|
92 |
// if the object header was the same, we're done
|
|
93 |
// if the object header was not the same, it is now in the hdr register
|
|
94 |
// => test if it is a stack pointer into the same stack (recursive locking), i.e.:
|
|
95 |
//
|
|
96 |
// 1) (hdr & aligned_mask) == 0
|
|
97 |
// 2) sp <= hdr
|
|
98 |
// 3) hdr <= sp + page_size
|
|
99 |
//
|
|
100 |
// these 3 tests can be done by evaluating the following expression:
|
|
101 |
//
|
|
102 |
// (hdr - sp) & (aligned_mask - page_size)
|
|
103 |
//
|
|
104 |
// assuming both the stack pointer and page_size have their least
|
|
105 |
// significant 2 bits cleared and page_size is a power of 2
|
|
106 |
mov(rscratch1, sp);
|
|
107 |
sub(hdr, hdr, rscratch1);
|
|
108 |
ands(hdr, hdr, aligned_mask - os::vm_page_size());
|
|
109 |
// for recursive locking, the result is zero => save it in the displaced header
|
|
110 |
// location (NULL in the displaced hdr location indicates recursive locking)
|
|
111 |
str(hdr, Address(disp_hdr, 0));
|
|
112 |
// otherwise we don't care about the result and handle locking via runtime call
|
|
113 |
cbnz(hdr, slow_case);
|
|
114 |
// done
|
|
115 |
bind(done);
|
|
116 |
if (PrintBiasedLockingStatistics) {
|
|
117 |
lea(rscratch2, ExternalAddress((address)BiasedLocking::fast_path_entry_count_addr()));
|
|
118 |
addmw(Address(rscratch2, 0), 1, rscratch1);
|
|
119 |
}
|
|
120 |
return null_check_offset;
|
|
121 |
}
|
|
122 |
|
|
123 |
|
|
124 |
void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
|
|
125 |
const int aligned_mask = BytesPerWord -1;
|
|
126 |
const int hdr_offset = oopDesc::mark_offset_in_bytes();
|
|
127 |
assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different");
|
|
128 |
Label done;
|
|
129 |
|
|
130 |
if (UseBiasedLocking) {
|
|
131 |
// load object
|
|
132 |
ldr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
|
|
133 |
biased_locking_exit(obj, hdr, done);
|
|
134 |
}
|
|
135 |
|
|
136 |
// load displaced header
|
|
137 |
ldr(hdr, Address(disp_hdr, 0));
|
|
138 |
// if the loaded hdr is NULL we had recursive locking
|
|
139 |
// if we had recursive locking, we are done
|
|
140 |
cbz(hdr, done);
|
|
141 |
if (!UseBiasedLocking) {
|
|
142 |
// load object
|
|
143 |
ldr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
|
|
144 |
}
|
|
145 |
verify_oop(obj);
|
|
146 |
// test if object header is pointing to the displaced header, and if so, restore
|
|
147 |
// the displaced header in the object - if the object header is not pointing to
|
|
148 |
// the displaced header, get the object header instead
|
|
149 |
// if the object header was not pointing to the displaced header,
|
|
150 |
// we do unlocking via runtime call
|
|
151 |
if (hdr_offset) {
|
|
152 |
lea(rscratch1, Address(obj, hdr_offset));
|
|
153 |
cmpxchgptr(disp_hdr, hdr, rscratch1, rscratch2, done, &slow_case);
|
|
154 |
} else {
|
|
155 |
cmpxchgptr(disp_hdr, hdr, obj, rscratch2, done, &slow_case);
|
|
156 |
}
|
|
157 |
// done
|
|
158 |
bind(done);
|
|
159 |
}
|
|
160 |
|
|
161 |
|
|
162 |
// Defines obj, preserves var_size_in_bytes
|
|
163 |
void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2, Label& slow_case) {
|
|
164 |
if (UseTLAB) {
|
|
165 |
tlab_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
|
|
166 |
} else {
|
|
167 |
eden_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case);
|
|
168 |
incr_allocated_bytes(noreg, var_size_in_bytes, con_size_in_bytes, t1);
|
|
169 |
}
|
|
170 |
}
|
|
171 |
|
|
172 |
void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) {
|
|
173 |
assert_different_registers(obj, klass, len);
|
|
174 |
if (UseBiasedLocking && !len->is_valid()) {
|
|
175 |
assert_different_registers(obj, klass, len, t1, t2);
|
|
176 |
ldr(t1, Address(klass, Klass::prototype_header_offset()));
|
|
177 |
} else {
|
|
178 |
// This assumes that all prototype bits fit in an int32_t
|
|
179 |
mov(t1, (int32_t)(intptr_t)markOopDesc::prototype());
|
|
180 |
}
|
|
181 |
str(t1, Address(obj, oopDesc::mark_offset_in_bytes()));
|
|
182 |
|
|
183 |
if (UseCompressedClassPointers) { // Take care not to kill klass
|
|
184 |
encode_klass_not_null(t1, klass);
|
|
185 |
strw(t1, Address(obj, oopDesc::klass_offset_in_bytes()));
|
|
186 |
} else {
|
|
187 |
str(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
|
|
188 |
}
|
|
189 |
|
|
190 |
if (len->is_valid()) {
|
|
191 |
strw(len, Address(obj, arrayOopDesc::length_offset_in_bytes()));
|
|
192 |
} else if (UseCompressedClassPointers) {
|
|
193 |
store_klass_gap(obj, zr);
|
|
194 |
}
|
|
195 |
}
|
|
196 |
|
|
197 |
// Zero words; len is in bytes
|
|
198 |
// Destroys all registers except addr
|
|
199 |
// len must be a nonzero multiple of wordSize
|
|
200 |
void C1_MacroAssembler::zero_memory(Register addr, Register len, Register t1) {
|
|
201 |
assert_different_registers(addr, len, t1, rscratch1, rscratch2);
|
|
202 |
|
|
203 |
#ifdef ASSERT
|
|
204 |
{ Label L;
|
|
205 |
tst(len, BytesPerWord - 1);
|
|
206 |
br(Assembler::EQ, L);
|
|
207 |
stop("len is not a multiple of BytesPerWord");
|
|
208 |
bind(L);
|
|
209 |
}
|
|
210 |
#endif
|
|
211 |
|
|
212 |
#ifndef PRODUCT
|
|
213 |
block_comment("zero memory");
|
|
214 |
#endif
|
|
215 |
|
|
216 |
Label loop;
|
|
217 |
Label entry;
|
|
218 |
|
|
219 |
// Algorithm:
|
|
220 |
//
|
|
221 |
// scratch1 = cnt & 7;
|
|
222 |
// cnt -= scratch1;
|
|
223 |
// p += scratch1;
|
|
224 |
// switch (scratch1) {
|
|
225 |
// do {
|
|
226 |
// cnt -= 8;
|
|
227 |
// p[-8] = 0;
|
|
228 |
// case 7:
|
|
229 |
// p[-7] = 0;
|
|
230 |
// case 6:
|
|
231 |
// p[-6] = 0;
|
|
232 |
// // ...
|
|
233 |
// case 1:
|
|
234 |
// p[-1] = 0;
|
|
235 |
// case 0:
|
|
236 |
// p += 8;
|
|
237 |
// } while (cnt);
|
|
238 |
// }
|
|
239 |
|
|
240 |
const int unroll = 8; // Number of str(zr) instructions we'll unroll
|
|
241 |
|
|
242 |
lsr(len, len, LogBytesPerWord);
|
|
243 |
andr(rscratch1, len, unroll - 1); // tmp1 = cnt % unroll
|
|
244 |
sub(len, len, rscratch1); // cnt -= unroll
|
|
245 |
// t1 always points to the end of the region we're about to zero
|
|
246 |
add(t1, addr, rscratch1, Assembler::LSL, LogBytesPerWord);
|
|
247 |
adr(rscratch2, entry);
|
|
248 |
sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 2);
|
|
249 |
br(rscratch2);
|
|
250 |
bind(loop);
|
|
251 |
sub(len, len, unroll);
|
|
252 |
for (int i = -unroll; i < 0; i++)
|
|
253 |
str(zr, Address(t1, i * wordSize));
|
|
254 |
bind(entry);
|
|
255 |
add(t1, t1, unroll * wordSize);
|
|
256 |
cbnz(len, loop);
|
|
257 |
}
|
|
258 |
|
|
259 |
// preserves obj, destroys len_in_bytes
|
|
260 |
void C1_MacroAssembler::initialize_body(Register obj, Register len_in_bytes, int hdr_size_in_bytes, Register t1) {
|
|
261 |
Label done;
|
|
262 |
assert(obj != len_in_bytes && obj != t1 && t1 != len_in_bytes, "registers must be different");
|
|
263 |
assert((hdr_size_in_bytes & (BytesPerWord - 1)) == 0, "header size is not a multiple of BytesPerWord");
|
|
264 |
Register index = len_in_bytes;
|
|
265 |
// index is positive and ptr sized
|
|
266 |
subs(index, index, hdr_size_in_bytes);
|
|
267 |
br(Assembler::EQ, done);
|
|
268 |
// note: for the remaining code to work, index must be a multiple of BytesPerWord
|
|
269 |
#ifdef ASSERT
|
|
270 |
{ Label L;
|
|
271 |
tst(index, BytesPerWord - 1);
|
|
272 |
br(Assembler::EQ, L);
|
|
273 |
stop("index is not a multiple of BytesPerWord");
|
|
274 |
bind(L);
|
|
275 |
}
|
|
276 |
#endif
|
|
277 |
|
|
278 |
// Preserve obj
|
|
279 |
if (hdr_size_in_bytes)
|
|
280 |
add(obj, obj, hdr_size_in_bytes);
|
|
281 |
zero_memory(obj, index, t1);
|
|
282 |
if (hdr_size_in_bytes)
|
|
283 |
sub(obj, obj, hdr_size_in_bytes);
|
|
284 |
|
|
285 |
// done
|
|
286 |
bind(done);
|
|
287 |
}
|
|
288 |
|
|
289 |
|
|
290 |
void C1_MacroAssembler::allocate_object(Register obj, Register t1, Register t2, int header_size, int object_size, Register klass, Label& slow_case) {
|
|
291 |
assert_different_registers(obj, t1, t2); // XXX really?
|
|
292 |
assert(header_size >= 0 && object_size >= header_size, "illegal sizes");
|
|
293 |
|
|
294 |
try_allocate(obj, noreg, object_size * BytesPerWord, t1, t2, slow_case);
|
|
295 |
|
|
296 |
initialize_object(obj, klass, noreg, object_size * HeapWordSize, t1, t2);
|
|
297 |
}
|
|
298 |
|
|
299 |
void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2) {
|
|
300 |
assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0,
|
|
301 |
"con_size_in_bytes is not multiple of alignment");
|
|
302 |
const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize;
|
|
303 |
|
|
304 |
initialize_header(obj, klass, noreg, t1, t2);
|
|
305 |
|
|
306 |
// clear rest of allocated space
|
|
307 |
const Register index = t2;
|
|
308 |
const int threshold = 16 * BytesPerWord; // approximate break even point for code size (see comments below)
|
|
309 |
if (var_size_in_bytes != noreg) {
|
|
310 |
mov(index, var_size_in_bytes);
|
|
311 |
initialize_body(obj, index, hdr_size_in_bytes, t1);
|
|
312 |
} else if (con_size_in_bytes <= threshold) {
|
|
313 |
// use explicit null stores
|
|
314 |
int i = hdr_size_in_bytes;
|
|
315 |
if (i < con_size_in_bytes && (con_size_in_bytes % (2 * BytesPerWord))) {
|
|
316 |
str(zr, Address(obj, i));
|
|
317 |
i += BytesPerWord;
|
|
318 |
}
|
|
319 |
for (; i < con_size_in_bytes; i += 2 * BytesPerWord)
|
|
320 |
stp(zr, zr, Address(obj, i));
|
|
321 |
} else if (con_size_in_bytes > hdr_size_in_bytes) {
|
|
322 |
block_comment("zero memory");
|
|
323 |
// use loop to null out the fields
|
|
324 |
|
|
325 |
int words = (con_size_in_bytes - hdr_size_in_bytes) / BytesPerWord;
|
|
326 |
mov(index, words / 8);
|
|
327 |
|
|
328 |
const int unroll = 8; // Number of str(zr) instructions we'll unroll
|
|
329 |
int remainder = words % unroll;
|
|
330 |
lea(rscratch1, Address(obj, hdr_size_in_bytes + remainder * BytesPerWord));
|
|
331 |
|
|
332 |
Label entry_point, loop;
|
|
333 |
b(entry_point);
|
|
334 |
|
|
335 |
bind(loop);
|
|
336 |
sub(index, index, 1);
|
|
337 |
for (int i = -unroll; i < 0; i++) {
|
|
338 |
if (-i == remainder)
|
|
339 |
bind(entry_point);
|
|
340 |
str(zr, Address(rscratch1, i * wordSize));
|
|
341 |
}
|
|
342 |
if (remainder == 0)
|
|
343 |
bind(entry_point);
|
|
344 |
add(rscratch1, rscratch1, unroll * wordSize);
|
|
345 |
cbnz(index, loop);
|
|
346 |
|
|
347 |
}
|
|
348 |
|
|
349 |
membar(StoreStore);
|
|
350 |
|
|
351 |
if (CURRENT_ENV->dtrace_alloc_probes()) {
|
|
352 |
assert(obj == r0, "must be");
|
|
353 |
far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
|
|
354 |
}
|
|
355 |
|
|
356 |
verify_oop(obj);
|
|
357 |
}
|
|
358 |
void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, Register t2, int header_size, int f, Register klass, Label& slow_case) {
|
|
359 |
assert_different_registers(obj, len, t1, t2, klass);
|
|
360 |
|
|
361 |
// determine alignment mask
|
|
362 |
assert(!(BytesPerWord & 1), "must be a multiple of 2 for masking code to work");
|
|
363 |
|
|
364 |
// check for negative or excessive length
|
|
365 |
mov(rscratch1, (int32_t)max_array_allocation_length);
|
|
366 |
cmp(len, rscratch1);
|
|
367 |
br(Assembler::HS, slow_case);
|
|
368 |
|
|
369 |
const Register arr_size = t2; // okay to be the same
|
|
370 |
// align object end
|
|
371 |
mov(arr_size, (int32_t)header_size * BytesPerWord + MinObjAlignmentInBytesMask);
|
|
372 |
add(arr_size, arr_size, len, ext::uxtw, f);
|
|
373 |
andr(arr_size, arr_size, ~MinObjAlignmentInBytesMask);
|
|
374 |
|
|
375 |
try_allocate(obj, arr_size, 0, t1, t2, slow_case);
|
|
376 |
|
|
377 |
initialize_header(obj, klass, len, t1, t2);
|
|
378 |
|
|
379 |
// clear rest of allocated space
|
|
380 |
const Register len_zero = len;
|
|
381 |
initialize_body(obj, arr_size, header_size * BytesPerWord, len_zero);
|
|
382 |
|
|
383 |
membar(StoreStore);
|
|
384 |
|
|
385 |
if (CURRENT_ENV->dtrace_alloc_probes()) {
|
|
386 |
assert(obj == r0, "must be");
|
|
387 |
far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
|
|
388 |
}
|
|
389 |
|
|
390 |
verify_oop(obj);
|
|
391 |
}
|
|
392 |
|
|
393 |
|
|
394 |
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
|
|
395 |
verify_oop(receiver);
|
|
396 |
// explicit NULL check not needed since load from [klass_offset] causes a trap
|
|
397 |
// check against inline cache
|
|
398 |
assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
|
|
399 |
|
|
400 |
cmp_klass(receiver, iCache, rscratch1);
|
|
401 |
}
|
|
402 |
|
|
403 |
|
|
404 |
void C1_MacroAssembler::build_frame(int framesize, int bang_size_in_bytes) {
|
|
405 |
// If we have to make this method not-entrant we'll overwrite its
|
|
406 |
// first instruction with a jump. For this action to be legal we
|
|
407 |
// must ensure that this first instruction is a B, BL, NOP, BKPT,
|
|
408 |
// SVC, HVC, or SMC. Make it a NOP.
|
|
409 |
nop();
|
|
410 |
assert(bang_size_in_bytes >= framesize, "stack bang size incorrect");
|
|
411 |
// Make sure there is enough stack space for this method's activation.
|
|
412 |
// Note that we do this before doing an enter().
|
|
413 |
generate_stack_overflow_check(bang_size_in_bytes);
|
|
414 |
MacroAssembler::build_frame(framesize + 2 * wordSize);
|
|
415 |
if (NotifySimulator) {
|
|
416 |
notify(Assembler::method_entry);
|
|
417 |
}
|
|
418 |
}
|
|
419 |
|
|
420 |
void C1_MacroAssembler::remove_frame(int framesize) {
|
|
421 |
MacroAssembler::remove_frame(framesize + 2 * wordSize);
|
|
422 |
if (NotifySimulator) {
|
|
423 |
notify(Assembler::method_reentry);
|
|
424 |
}
|
|
425 |
}
|
|
426 |
|
|
427 |
|
|
428 |
void C1_MacroAssembler::verified_entry() {
|
|
429 |
}
|
|
430 |
|
|
431 |
#ifndef PRODUCT
|
|
432 |
|
|
433 |
void C1_MacroAssembler::verify_stack_oop(int stack_offset) {
|
|
434 |
if (!VerifyOops) return;
|
|
435 |
verify_oop_addr(Address(sp, stack_offset), "oop");
|
|
436 |
}
|
|
437 |
|
|
438 |
void C1_MacroAssembler::verify_not_null_oop(Register r) {
|
|
439 |
if (!VerifyOops) return;
|
|
440 |
Label not_null;
|
|
441 |
cbnz(r, not_null);
|
|
442 |
stop("non-null oop required");
|
|
443 |
bind(not_null);
|
|
444 |
verify_oop(r);
|
|
445 |
}
|
|
446 |
|
|
447 |
void C1_MacroAssembler::invalidate_registers(bool inv_r0, bool inv_r19, bool inv_r2, bool inv_r3, bool inv_r4, bool inv_r5) {
|
|
448 |
#ifdef ASSERT
|
|
449 |
static int nn;
|
|
450 |
if (inv_r0) mov(r0, 0xDEAD);
|
|
451 |
if (inv_r19) mov(r19, 0xDEAD);
|
|
452 |
if (inv_r2) mov(r2, nn++);
|
|
453 |
if (inv_r3) mov(r3, 0xDEAD);
|
|
454 |
if (inv_r4) mov(r4, 0xDEAD);
|
|
455 |
if (inv_r5) mov(r5, 0xDEAD);
|
|
456 |
#endif
|
|
457 |
}
|
|
458 |
#endif // ifndef PRODUCT
|