author | vlivanov |
Fri, 06 May 2016 18:20:50 +0300 | |
changeset 38241 | 32eab2eb41fd |
parent 38144 | 0976c0c5c5d3 |
parent 38237 | d972e3a2df53 |
child 38304 | 6c8815222785 |
permissions | -rw-r--r-- |
14631 | 1 |
/* |
35470
75c679ad0747
8144573: TLABWasteIncrement=max_jint fires an assert on SPARC for non-G1 GC mode
sangheki
parents:
35214
diff
changeset
|
2 |
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. |
14631 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 |
* or visit www.oracle.com if you need additional information or have any |
|
21 |
* questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
25 |
#include "precompiled.hpp" |
|
25715
d5a8dbdc5150
8049325: Introduce and clean up umbrella headers for the files in the cpu subdirectories.
goetz
parents:
25468
diff
changeset
|
26 |
#include "asm/macroAssembler.inline.hpp" |
14631 | 27 |
#include "compiler/disassembler.hpp" |
30764 | 28 |
#include "gc/shared/cardTableModRefBS.hpp" |
29 |
#include "gc/shared/collectedHeap.inline.hpp" |
|
14631 | 30 |
#include "interpreter/interpreter.hpp" |
31 |
#include "memory/resourceArea.hpp" |
|
19319
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18446
diff
changeset
|
32 |
#include "memory/universe.hpp" |
31849
92ca49fa9fa7
8131344: Missing klass.inline.hpp include in compiler files
tschatzl
parents:
31515
diff
changeset
|
33 |
#include "oops/klass.inline.hpp" |
14631 | 34 |
#include "prims/methodHandles.hpp" |
35 |
#include "runtime/biasedLocking.hpp" |
|
36 |
#include "runtime/interfaceSupport.hpp" |
|
37 |
#include "runtime/objectMonitor.hpp" |
|
25468
5331df506290
8048241: Introduce umbrella header os.inline.hpp and clean up includes
goetz
parents:
24917
diff
changeset
|
38 |
#include "runtime/os.inline.hpp" |
14631 | 39 |
#include "runtime/sharedRuntime.hpp" |
40 |
#include "runtime/stubRoutines.hpp" |
|
15482
470d0b0c09f1
8005915: Unify SERIALGC and INCLUDE_ALTERNATE_GCS
jprovino
parents:
15116
diff
changeset
|
41 |
#include "utilities/macros.hpp" |
470d0b0c09f1
8005915: Unify SERIALGC and INCLUDE_ALTERNATE_GCS
jprovino
parents:
15116
diff
changeset
|
42 |
#if INCLUDE_ALL_GCS |
30764 | 43 |
#include "gc/g1/g1CollectedHeap.inline.hpp" |
44 |
#include "gc/g1/g1SATBCardTableModRefBS.hpp" |
|
45 |
#include "gc/g1/heapRegion.hpp" |
|
15482
470d0b0c09f1
8005915: Unify SERIALGC and INCLUDE_ALTERNATE_GCS
jprovino
parents:
15116
diff
changeset
|
46 |
#endif // INCLUDE_ALL_GCS |
33628 | 47 |
#ifdef COMPILER2 |
48 |
#include "opto/intrinsicnode.hpp" |
|
49 |
#endif |
|
14631 | 50 |
|
51 |
#ifdef PRODUCT |
|
52 |
#define BLOCK_COMMENT(str) /* nothing */ |
|
53 |
#define STOP(error) stop(error) |
|
54 |
#else |
|
55 |
#define BLOCK_COMMENT(str) block_comment(str) |
|
56 |
#define STOP(error) block_comment(error); stop(error) |
|
57 |
#endif |
|
58 |
||
59 |
// Convert the raw encoding form into the form expected by the |
|
60 |
// constructor for Address. |
|
61 |
Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) { |
|
62 |
assert(scale == 0, "not supported"); |
|
63 |
RelocationHolder rspec; |
|
64 |
if (disp_reloc != relocInfo::none) { |
|
65 |
rspec = Relocation::spec_simple(disp_reloc); |
|
66 |
} |
|
67 |
||
68 |
Register rindex = as_Register(index); |
|
69 |
if (rindex != G0) { |
|
70 |
Address madr(as_Register(base), rindex); |
|
71 |
madr._rspec = rspec; |
|
72 |
return madr; |
|
73 |
} else { |
|
74 |
Address madr(as_Register(base), disp); |
|
75 |
madr._rspec = rspec; |
|
76 |
return madr; |
|
77 |
} |
|
78 |
} |
|
79 |
||
80 |
Address Argument::address_in_frame() const { |
|
81 |
// Warning: In LP64 mode disp will occupy more than 10 bits, but |
|
82 |
// op codes such as ld or ldx, only access disp() to get |
|
83 |
// their simm13 argument. |
|
84 |
int disp = ((_number - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS; |
|
85 |
if (is_in()) |
|
86 |
return Address(FP, disp); // In argument. |
|
87 |
else |
|
88 |
return Address(SP, disp); // Out argument. |
|
89 |
} |
|
90 |
||
91 |
static const char* argumentNames[][2] = { |
|
92 |
{"A0","P0"}, {"A1","P1"}, {"A2","P2"}, {"A3","P3"}, {"A4","P4"}, |
|
93 |
{"A5","P5"}, {"A6","P6"}, {"A7","P7"}, {"A8","P8"}, {"A9","P9"}, |
|
94 |
{"A(n>9)","P(n>9)"} |
|
95 |
}; |
|
96 |
||
97 |
const char* Argument::name() const { |
|
98 |
int nofArgs = sizeof argumentNames / sizeof argumentNames[0]; |
|
99 |
int num = number(); |
|
100 |
if (num >= nofArgs) num = nofArgs - 1; |
|
101 |
return argumentNames[num][is_in() ? 1 : 0]; |
|
102 |
} |
|
103 |
||
104 |
#ifdef ASSERT |
|
105 |
// On RISC, there's no benefit to verifying instruction boundaries. |
|
106 |
bool AbstractAssembler::pd_check_instruction_mark() { return false; } |
|
107 |
#endif |
|
108 |
||
109 |
// Patch instruction inst at offset inst_pos to refer to dest_pos |
|
110 |
// and return the resulting instruction. |
|
111 |
// We should have pcs, not offsets, but since all is relative, it will work out |
|
112 |
// OK. |
|
113 |
int MacroAssembler::patched_branch(int dest_pos, int inst, int inst_pos) { |
|
114 |
int m; // mask for displacement field |
|
115 |
int v; // new value for displacement field |
|
116 |
const int word_aligned_ones = -4; |
|
117 |
switch (inv_op(inst)) { |
|
118 |
default: ShouldNotReachHere(); |
|
119 |
case call_op: m = wdisp(word_aligned_ones, 0, 30); v = wdisp(dest_pos, inst_pos, 30); break; |
|
120 |
case branch_op: |
|
121 |
switch (inv_op2(inst)) { |
|
122 |
case fbp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; |
|
123 |
case bp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; |
|
124 |
case fb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; |
|
125 |
case br_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; |
|
126 |
case bpr_op2: { |
|
127 |
if (is_cbcond(inst)) { |
|
128 |
m = wdisp10(word_aligned_ones, 0); |
|
129 |
v = wdisp10(dest_pos, inst_pos); |
|
130 |
} else { |
|
131 |
m = wdisp16(word_aligned_ones, 0); |
|
132 |
v = wdisp16(dest_pos, inst_pos); |
|
133 |
} |
|
134 |
break; |
|
135 |
} |
|
136 |
default: ShouldNotReachHere(); |
|
137 |
} |
|
138 |
} |
|
139 |
return inst & ~m | v; |
|
140 |
} |
|
141 |
||
142 |
// Return the offset of the branch destionation of instruction inst |
|
143 |
// at offset pos. |
|
144 |
// Should have pcs, but since all is relative, it works out. |
|
145 |
int MacroAssembler::branch_destination(int inst, int pos) { |
|
146 |
int r; |
|
147 |
switch (inv_op(inst)) { |
|
148 |
default: ShouldNotReachHere(); |
|
149 |
case call_op: r = inv_wdisp(inst, pos, 30); break; |
|
150 |
case branch_op: |
|
151 |
switch (inv_op2(inst)) { |
|
152 |
case fbp_op2: r = inv_wdisp( inst, pos, 19); break; |
|
153 |
case bp_op2: r = inv_wdisp( inst, pos, 19); break; |
|
154 |
case fb_op2: r = inv_wdisp( inst, pos, 22); break; |
|
155 |
case br_op2: r = inv_wdisp( inst, pos, 22); break; |
|
156 |
case bpr_op2: { |
|
157 |
if (is_cbcond(inst)) { |
|
158 |
r = inv_wdisp10(inst, pos); |
|
159 |
} else { |
|
160 |
r = inv_wdisp16(inst, pos); |
|
161 |
} |
|
162 |
break; |
|
163 |
} |
|
164 |
default: ShouldNotReachHere(); |
|
165 |
} |
|
166 |
} |
|
167 |
return r; |
|
168 |
} |
|
169 |
||
170 |
void MacroAssembler::null_check(Register reg, int offset) { |
|
171 |
if (needs_explicit_null_check((intptr_t)offset)) { |
|
172 |
// provoke OS NULL exception if reg = NULL by |
|
173 |
// accessing M[reg] w/o changing any registers |
|
174 |
ld_ptr(reg, 0, G0); |
|
175 |
} |
|
176 |
else { |
|
177 |
// nothing to do, (later) access of M[reg + offset] |
|
178 |
// will provoke OS NULL exception if reg = NULL |
|
179 |
} |
|
180 |
} |
|
181 |
||
182 |
// Ring buffer jumps |
|
183 |
||
184 |
||
185 |
void MacroAssembler::jmp2(Register r1, Register r2, const char* file, int line ) { |
|
186 |
assert_not_delayed(); |
|
187 |
// This can only be traceable if r1 & r2 are visible after a window save |
|
188 |
if (TraceJumps) { |
|
189 |
#ifndef PRODUCT |
|
190 |
save_frame(0); |
|
191 |
verify_thread(); |
|
192 |
ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0); |
|
193 |
add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1); |
|
194 |
sll(O0, exact_log2(4*sizeof(intptr_t)), O2); |
|
195 |
add(O2, O1, O1); |
|
196 |
||
197 |
add(r1->after_save(), r2->after_save(), O2); |
|
198 |
set((intptr_t)file, O3); |
|
199 |
set(line, O4); |
|
200 |
Label L; |
|
201 |
// get nearby pc, store jmp target |
|
202 |
call(L, relocInfo::none); // No relocation for call to pc+0x8 |
|
203 |
delayed()->st(O2, O1, 0); |
|
204 |
bind(L); |
|
205 |
||
206 |
// store nearby pc |
|
207 |
st(O7, O1, sizeof(intptr_t)); |
|
208 |
// store file |
|
209 |
st(O3, O1, 2*sizeof(intptr_t)); |
|
210 |
// store line |
|
211 |
st(O4, O1, 3*sizeof(intptr_t)); |
|
212 |
add(O0, 1, O0); |
|
213 |
and3(O0, JavaThread::jump_ring_buffer_size - 1, O0); |
|
214 |
st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset())); |
|
215 |
restore(); |
|
216 |
#endif /* PRODUCT */ |
|
217 |
} |
|
218 |
jmpl(r1, r2, G0); |
|
219 |
} |
|
220 |
void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) { |
|
221 |
assert_not_delayed(); |
|
222 |
// This can only be traceable if r1 is visible after a window save |
|
223 |
if (TraceJumps) { |
|
224 |
#ifndef PRODUCT |
|
225 |
save_frame(0); |
|
226 |
verify_thread(); |
|
227 |
ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0); |
|
228 |
add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1); |
|
229 |
sll(O0, exact_log2(4*sizeof(intptr_t)), O2); |
|
230 |
add(O2, O1, O1); |
|
231 |
||
232 |
add(r1->after_save(), offset, O2); |
|
233 |
set((intptr_t)file, O3); |
|
234 |
set(line, O4); |
|
235 |
Label L; |
|
236 |
// get nearby pc, store jmp target |
|
237 |
call(L, relocInfo::none); // No relocation for call to pc+0x8 |
|
238 |
delayed()->st(O2, O1, 0); |
|
239 |
bind(L); |
|
240 |
||
241 |
// store nearby pc |
|
242 |
st(O7, O1, sizeof(intptr_t)); |
|
243 |
// store file |
|
244 |
st(O3, O1, 2*sizeof(intptr_t)); |
|
245 |
// store line |
|
246 |
st(O4, O1, 3*sizeof(intptr_t)); |
|
247 |
add(O0, 1, O0); |
|
248 |
and3(O0, JavaThread::jump_ring_buffer_size - 1, O0); |
|
249 |
st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset())); |
|
250 |
restore(); |
|
251 |
#endif /* PRODUCT */ |
|
252 |
} |
|
253 |
jmp(r1, offset); |
|
254 |
} |
|
255 |
||
256 |
// This code sequence is relocatable to any address, even on LP64. |
|
257 |
void MacroAssembler::jumpl(const AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line) { |
|
258 |
assert_not_delayed(); |
|
259 |
// Force fixed length sethi because NativeJump and NativeFarCall don't handle |
|
260 |
// variable length instruction streams. |
|
261 |
patchable_sethi(addrlit, temp); |
|
262 |
Address a(temp, addrlit.low10() + offset); // Add the offset to the displacement. |
|
263 |
if (TraceJumps) { |
|
264 |
#ifndef PRODUCT |
|
265 |
// Must do the add here so relocation can find the remainder of the |
|
266 |
// value to be relocated. |
|
267 |
add(a.base(), a.disp(), a.base(), addrlit.rspec(offset)); |
|
268 |
save_frame(0); |
|
269 |
verify_thread(); |
|
270 |
ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0); |
|
271 |
add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1); |
|
272 |
sll(O0, exact_log2(4*sizeof(intptr_t)), O2); |
|
273 |
add(O2, O1, O1); |
|
274 |
||
275 |
set((intptr_t)file, O3); |
|
276 |
set(line, O4); |
|
277 |
Label L; |
|
278 |
||
279 |
// get nearby pc, store jmp target |
|
280 |
call(L, relocInfo::none); // No relocation for call to pc+0x8 |
|
281 |
delayed()->st(a.base()->after_save(), O1, 0); |
|
282 |
bind(L); |
|
283 |
||
284 |
// store nearby pc |
|
285 |
st(O7, O1, sizeof(intptr_t)); |
|
286 |
// store file |
|
287 |
st(O3, O1, 2*sizeof(intptr_t)); |
|
288 |
// store line |
|
289 |
st(O4, O1, 3*sizeof(intptr_t)); |
|
290 |
add(O0, 1, O0); |
|
291 |
and3(O0, JavaThread::jump_ring_buffer_size - 1, O0); |
|
292 |
st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset())); |
|
293 |
restore(); |
|
294 |
jmpl(a.base(), G0, d); |
|
295 |
#else |
|
296 |
jmpl(a.base(), a.disp(), d); |
|
297 |
#endif /* PRODUCT */ |
|
298 |
} else { |
|
299 |
jmpl(a.base(), a.disp(), d); |
|
300 |
} |
|
301 |
} |
|
302 |
||
303 |
void MacroAssembler::jump(const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line) { |
|
304 |
jumpl(addrlit, temp, G0, offset, file, line); |
|
305 |
} |
|
306 |
||
307 |
||
308 |
// Conditional breakpoint (for assertion checks in assembly code) |
|
309 |
void MacroAssembler::breakpoint_trap(Condition c, CC cc) { |
|
310 |
trap(c, cc, G0, ST_RESERVED_FOR_USER_0); |
|
311 |
} |
|
312 |
||
313 |
// We want to use ST_BREAKPOINT here, but the debugger is confused by it. |
|
314 |
void MacroAssembler::breakpoint_trap() { |
|
315 |
trap(ST_RESERVED_FOR_USER_0); |
|
316 |
} |
|
317 |
||
318 |
// Write serialization page so VM thread can do a pseudo remote membar |
|
319 |
// We use the current thread pointer to calculate a thread specific |
|
320 |
// offset to write to within the page. This minimizes bus traffic |
|
321 |
// due to cache line collision. |
|
322 |
void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) { |
|
323 |
srl(thread, os::get_serialize_page_shift_count(), tmp2); |
|
324 |
if (Assembler::is_simm13(os::vm_page_size())) { |
|
325 |
and3(tmp2, (os::vm_page_size() - sizeof(int)), tmp2); |
|
326 |
} |
|
327 |
else { |
|
328 |
set((os::vm_page_size() - sizeof(int)), tmp1); |
|
329 |
and3(tmp2, tmp1, tmp2); |
|
330 |
} |
|
331 |
set(os::get_memory_serialize_page(), tmp1); |
|
332 |
st(G0, tmp1, tmp2); |
|
333 |
} |
|
334 |
||
335 |
||
336 |
||
337 |
void MacroAssembler::enter() { |
|
338 |
Unimplemented(); |
|
339 |
} |
|
340 |
||
341 |
void MacroAssembler::leave() { |
|
342 |
Unimplemented(); |
|
343 |
} |
|
344 |
||
345 |
// Calls to C land |
|
346 |
||
347 |
#ifdef ASSERT |
|
348 |
// a hook for debugging |
|
349 |
static Thread* reinitialize_thread() { |
|
34633
2a6c7c7b30a7
8132510: Replace ThreadLocalStorage with compiler/language-based thread-local variables
dholmes
parents:
34211
diff
changeset
|
350 |
return Thread::current(); |
14631 | 351 |
} |
352 |
#else |
|
34633
2a6c7c7b30a7
8132510: Replace ThreadLocalStorage with compiler/language-based thread-local variables
dholmes
parents:
34211
diff
changeset
|
353 |
#define reinitialize_thread Thread::current |
14631 | 354 |
#endif |
355 |
||
356 |
#ifdef ASSERT |
|
357 |
address last_get_thread = NULL; |
|
358 |
#endif |
|
359 |
||
360 |
// call this when G2_thread is not known to be valid |
|
361 |
void MacroAssembler::get_thread() { |
|
362 |
save_frame(0); // to avoid clobbering O0 |
|
363 |
mov(G1, L0); // avoid clobbering G1 |
|
364 |
mov(G5_method, L1); // avoid clobbering G5 |
|
365 |
mov(G3, L2); // avoid clobbering G3 also |
|
366 |
mov(G4, L5); // avoid clobbering G4 |
|
367 |
#ifdef ASSERT |
|
368 |
AddressLiteral last_get_thread_addrlit(&last_get_thread); |
|
369 |
set(last_get_thread_addrlit, L3); |
|
18097 | 370 |
rdpc(L4); |
371 |
inc(L4, 3 * BytesPerInstWord); // skip rdpc + inc + st_ptr to point L4 at call st_ptr(L4, L3, 0); |
|
14631 | 372 |
#endif |
373 |
call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type); |
|
374 |
delayed()->nop(); |
|
375 |
mov(L0, G1); |
|
376 |
mov(L1, G5_method); |
|
377 |
mov(L2, G3); |
|
378 |
mov(L5, G4); |
|
379 |
restore(O0, 0, G2_thread); |
|
380 |
} |
|
381 |
||
382 |
static Thread* verify_thread_subroutine(Thread* gthread_value) { |
|
34633
2a6c7c7b30a7
8132510: Replace ThreadLocalStorage with compiler/language-based thread-local variables
dholmes
parents:
34211
diff
changeset
|
383 |
Thread* correct_value = Thread::current(); |
14631 | 384 |
guarantee(gthread_value == correct_value, "G2_thread value must be the thread"); |
385 |
return correct_value; |
|
386 |
} |
|
387 |
||
388 |
void MacroAssembler::verify_thread() { |
|
389 |
if (VerifyThread) { |
|
390 |
// NOTE: this chops off the heads of the 64-bit O registers. |
|
391 |
// make sure G2_thread contains the right value |
|
392 |
save_frame_and_mov(0, Lmethod, Lmethod); // to avoid clobbering O0 (and propagate Lmethod for -Xprof) |
|
393 |
mov(G1, L1); // avoid clobbering G1 |
|
394 |
// G2 saved below |
|
395 |
mov(G3, L3); // avoid clobbering G3 |
|
396 |
mov(G4, L4); // avoid clobbering G4 |
|
397 |
mov(G5_method, L5); // avoid clobbering G5_method |
|
398 |
#if defined(COMPILER2) && !defined(_LP64) |
|
399 |
// Save & restore possible 64-bit Long arguments in G-regs |
|
400 |
srlx(G1,32,L0); |
|
401 |
srlx(G4,32,L6); |
|
402 |
#endif |
|
403 |
call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type); |
|
404 |
delayed()->mov(G2_thread, O0); |
|
405 |
||
406 |
mov(L1, G1); // Restore G1 |
|
407 |
// G2 restored below |
|
408 |
mov(L3, G3); // restore G3 |
|
409 |
mov(L4, G4); // restore G4 |
|
410 |
mov(L5, G5_method); // restore G5_method |
|
411 |
#if defined(COMPILER2) && !defined(_LP64) |
|
412 |
// Save & restore possible 64-bit Long arguments in G-regs |
|
413 |
sllx(L0,32,G2); // Move old high G1 bits high in G2 |
|
414 |
srl(G1, 0,G1); // Clear current high G1 bits |
|
415 |
or3 (G1,G2,G1); // Recover 64-bit G1 |
|
416 |
sllx(L6,32,G2); // Move old high G4 bits high in G2 |
|
417 |
srl(G4, 0,G4); // Clear current high G4 bits |
|
418 |
or3 (G4,G2,G4); // Recover 64-bit G4 |
|
419 |
#endif |
|
420 |
restore(O0, 0, G2_thread); |
|
421 |
} |
|
422 |
} |
|
423 |
||
424 |
||
425 |
void MacroAssembler::save_thread(const Register thread_cache) { |
|
426 |
verify_thread(); |
|
427 |
if (thread_cache->is_valid()) { |
|
428 |
assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); |
|
429 |
mov(G2_thread, thread_cache); |
|
430 |
} |
|
431 |
if (VerifyThread) { |
|
432 |
// smash G2_thread, as if the VM were about to anyway |
|
433 |
set(0x67676767, G2_thread); |
|
434 |
} |
|
435 |
} |
|
436 |
||
437 |
||
438 |
void MacroAssembler::restore_thread(const Register thread_cache) { |
|
439 |
if (thread_cache->is_valid()) { |
|
440 |
assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile"); |
|
441 |
mov(thread_cache, G2_thread); |
|
442 |
verify_thread(); |
|
443 |
} else { |
|
444 |
// do it the slow way |
|
445 |
get_thread(); |
|
446 |
} |
|
447 |
} |
|
448 |
||
449 |
||
450 |
// %%% maybe get rid of [re]set_last_Java_frame |
|
451 |
void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Java_pc) { |
|
452 |
assert_not_delayed(); |
|
453 |
Address flags(G2_thread, JavaThread::frame_anchor_offset() + |
|
454 |
JavaFrameAnchor::flags_offset()); |
|
455 |
Address pc_addr(G2_thread, JavaThread::last_Java_pc_offset()); |
|
456 |
||
457 |
// Always set last_Java_pc and flags first because once last_Java_sp is visible |
|
458 |
// has_last_Java_frame is true and users will look at the rest of the fields. |
|
459 |
// (Note: flags should always be zero before we get here so doesn't need to be set.) |
|
460 |
||
461 |
#ifdef ASSERT |
|
462 |
// Verify that flags was zeroed on return to Java |
|
463 |
Label PcOk; |
|
464 |
save_frame(0); // to avoid clobbering O0 |
|
465 |
ld_ptr(pc_addr, L0); |
|
466 |
br_null_short(L0, Assembler::pt, PcOk); |
|
467 |
STOP("last_Java_pc not zeroed before leaving Java"); |
|
468 |
bind(PcOk); |
|
469 |
||
470 |
// Verify that flags was zeroed on return to Java |
|
471 |
Label FlagsOk; |
|
472 |
ld(flags, L0); |
|
473 |
tst(L0); |
|
474 |
br(Assembler::zero, false, Assembler::pt, FlagsOk); |
|
475 |
delayed() -> restore(); |
|
476 |
STOP("flags not zeroed before leaving Java"); |
|
477 |
bind(FlagsOk); |
|
478 |
#endif /* ASSERT */ |
|
479 |
// |
|
480 |
// When returning from calling out from Java mode the frame anchor's last_Java_pc |
|
481 |
// will always be set to NULL. It is set here so that if we are doing a call to |
|
482 |
// native (not VM) that we capture the known pc and don't have to rely on the |
|
483 |
// native call having a standard frame linkage where we can find the pc. |
|
484 |
||
485 |
if (last_Java_pc->is_valid()) { |
|
486 |
st_ptr(last_Java_pc, pc_addr); |
|
487 |
} |
|
488 |
||
489 |
#ifdef _LP64 |
|
490 |
#ifdef ASSERT |
|
491 |
// Make sure that we have an odd stack |
|
492 |
Label StackOk; |
|
493 |
andcc(last_java_sp, 0x01, G0); |
|
494 |
br(Assembler::notZero, false, Assembler::pt, StackOk); |
|
495 |
delayed()->nop(); |
|
496 |
STOP("Stack Not Biased in set_last_Java_frame"); |
|
497 |
bind(StackOk); |
|
498 |
#endif // ASSERT |
|
499 |
assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame"); |
|
500 |
add( last_java_sp, STACK_BIAS, G4_scratch ); |
|
501 |
st_ptr(G4_scratch, G2_thread, JavaThread::last_Java_sp_offset()); |
|
502 |
#else |
|
503 |
st_ptr(last_java_sp, G2_thread, JavaThread::last_Java_sp_offset()); |
|
504 |
#endif // _LP64 |
|
505 |
} |
|
506 |
||
507 |
void MacroAssembler::reset_last_Java_frame(void) { |
|
508 |
assert_not_delayed(); |
|
509 |
||
510 |
Address sp_addr(G2_thread, JavaThread::last_Java_sp_offset()); |
|
511 |
Address pc_addr(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); |
|
512 |
Address flags (G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset()); |
|
513 |
||
514 |
#ifdef ASSERT |
|
515 |
// check that it WAS previously set |
|
516 |
save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod to helper frame for -Xprof |
|
517 |
ld_ptr(sp_addr, L0); |
|
518 |
tst(L0); |
|
519 |
breakpoint_trap(Assembler::zero, Assembler::ptr_cc); |
|
520 |
restore(); |
|
521 |
#endif // ASSERT |
|
522 |
||
523 |
st_ptr(G0, sp_addr); |
|
524 |
// Always return last_Java_pc to zero |
|
525 |
st_ptr(G0, pc_addr); |
|
526 |
// Always null flags after return to Java |
|
527 |
st(G0, flags); |
|
528 |
} |
|
529 |
||
530 |
||
531 |
void MacroAssembler::call_VM_base( |
|
532 |
Register oop_result, |
|
533 |
Register thread_cache, |
|
534 |
Register last_java_sp, |
|
535 |
address entry_point, |
|
536 |
int number_of_arguments, |
|
537 |
bool check_exceptions) |
|
538 |
{ |
|
539 |
assert_not_delayed(); |
|
540 |
||
541 |
// determine last_java_sp register |
|
542 |
if (!last_java_sp->is_valid()) { |
|
543 |
last_java_sp = SP; |
|
544 |
} |
|
545 |
// debugging support |
|
546 |
assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); |
|
547 |
||
548 |
// 64-bit last_java_sp is biased! |
|
549 |
set_last_Java_frame(last_java_sp, noreg); |
|
550 |
if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early |
|
551 |
save_thread(thread_cache); |
|
552 |
// do the call |
|
553 |
call(entry_point, relocInfo::runtime_call_type); |
|
554 |
if (!VerifyThread) |
|
555 |
delayed()->mov(G2_thread, O0); // pass thread as first argument |
|
556 |
else |
|
557 |
delayed()->nop(); // (thread already passed) |
|
558 |
restore_thread(thread_cache); |
|
559 |
reset_last_Java_frame(); |
|
560 |
||
561 |
// check for pending exceptions. use Gtemp as scratch register. |
|
562 |
if (check_exceptions) { |
|
563 |
check_and_forward_exception(Gtemp); |
|
564 |
} |
|
565 |
||
566 |
#ifdef ASSERT |
|
567 |
set(badHeapWordVal, G3); |
|
568 |
set(badHeapWordVal, G4); |
|
569 |
set(badHeapWordVal, G5); |
|
570 |
#endif |
|
571 |
||
572 |
// get oop result if there is one and reset the value in the thread |
|
573 |
if (oop_result->is_valid()) { |
|
574 |
get_vm_result(oop_result); |
|
575 |
} |
|
576 |
} |
|
577 |
||
578 |
void MacroAssembler::check_and_forward_exception(Register scratch_reg) |
|
579 |
{ |
|
580 |
Label L; |
|
581 |
||
582 |
check_and_handle_popframe(scratch_reg); |
|
583 |
check_and_handle_earlyret(scratch_reg); |
|
584 |
||
585 |
Address exception_addr(G2_thread, Thread::pending_exception_offset()); |
|
586 |
ld_ptr(exception_addr, scratch_reg); |
|
587 |
br_null_short(scratch_reg, pt, L); |
|
588 |
// we use O7 linkage so that forward_exception_entry has the issuing PC |
|
589 |
call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); |
|
590 |
delayed()->nop(); |
|
591 |
bind(L); |
|
592 |
} |
|
593 |
||
594 |
||
595 |
void MacroAssembler::check_and_handle_popframe(Register scratch_reg) { |
|
596 |
} |
|
597 |
||
598 |
||
599 |
void MacroAssembler::check_and_handle_earlyret(Register scratch_reg) { |
|
600 |
} |
|
601 |
||
602 |
||
603 |
void MacroAssembler::call_VM(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { |
|
604 |
call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); |
|
605 |
} |
|
606 |
||
607 |
||
608 |
void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) { |
|
609 |
// O0 is reserved for the thread |
|
610 |
mov(arg_1, O1); |
|
611 |
call_VM(oop_result, entry_point, 1, check_exceptions); |
|
612 |
} |
|
613 |
||
614 |
||
615 |
void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { |
|
616 |
// O0 is reserved for the thread |
|
617 |
mov(arg_1, O1); |
|
618 |
mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); |
|
619 |
call_VM(oop_result, entry_point, 2, check_exceptions); |
|
620 |
} |
|
621 |
||
622 |
||
623 |
void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { |
|
624 |
// O0 is reserved for the thread |
|
625 |
mov(arg_1, O1); |
|
626 |
mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); |
|
627 |
mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); |
|
628 |
call_VM(oop_result, entry_point, 3, check_exceptions); |
|
629 |
} |
|
630 |
||
631 |
||
632 |
||
633 |
// Note: The following call_VM overloadings are useful when a "save" |
|
634 |
// has already been performed by a stub, and the last Java frame is |
|
635 |
// the previous one. In that case, last_java_sp must be passed as FP |
|
636 |
// instead of SP. |
|
637 |
||
638 |
||
639 |
void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments, bool check_exceptions) { |
|
640 |
call_VM_base(oop_result, noreg, last_java_sp, entry_point, number_of_arguments, check_exceptions); |
|
641 |
} |
|
642 |
||
643 |
||
644 |
void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) { |
|
645 |
// O0 is reserved for the thread |
|
646 |
mov(arg_1, O1); |
|
647 |
call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); |
|
648 |
} |
|
649 |
||
650 |
||
651 |
void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { |
|
652 |
// O0 is reserved for the thread |
|
653 |
mov(arg_1, O1); |
|
654 |
mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); |
|
655 |
call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); |
|
656 |
} |
|
657 |
||
658 |
||
659 |
void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { |
|
660 |
// O0 is reserved for the thread |
|
661 |
mov(arg_1, O1); |
|
662 |
mov(arg_2, O2); assert(arg_2 != O1, "smashed argument"); |
|
663 |
mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument"); |
|
664 |
call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); |
|
665 |
} |
|
666 |
||
667 |
||
668 |
||
669 |
void MacroAssembler::call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments) { |
|
670 |
assert_not_delayed(); |
|
671 |
save_thread(thread_cache); |
|
672 |
// do the call |
|
673 |
call(entry_point, relocInfo::runtime_call_type); |
|
674 |
delayed()->nop(); |
|
675 |
restore_thread(thread_cache); |
|
676 |
#ifdef ASSERT |
|
677 |
set(badHeapWordVal, G3); |
|
678 |
set(badHeapWordVal, G4); |
|
679 |
set(badHeapWordVal, G5); |
|
680 |
#endif |
|
681 |
} |
|
682 |
||
683 |
||
684 |
void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments) { |
|
685 |
call_VM_leaf_base(thread_cache, entry_point, number_of_arguments); |
|
686 |
} |
|
687 |
||
688 |
||
689 |
void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1) { |
|
690 |
mov(arg_1, O0); |
|
691 |
call_VM_leaf(thread_cache, entry_point, 1); |
|
692 |
} |
|
693 |
||
694 |
||
695 |
void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) { |
|
696 |
mov(arg_1, O0); |
|
697 |
mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); |
|
698 |
call_VM_leaf(thread_cache, entry_point, 2); |
|
699 |
} |
|
700 |
||
701 |
||
702 |
void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3) { |
|
703 |
mov(arg_1, O0); |
|
704 |
mov(arg_2, O1); assert(arg_2 != O0, "smashed argument"); |
|
705 |
mov(arg_3, O2); assert(arg_3 != O0 && arg_3 != O1, "smashed argument"); |
|
706 |
call_VM_leaf(thread_cache, entry_point, 3); |
|
707 |
} |
|
708 |
||
709 |
||
710 |
void MacroAssembler::get_vm_result(Register oop_result) { |
|
711 |
verify_thread(); |
|
712 |
Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); |
|
713 |
ld_ptr( vm_result_addr, oop_result); |
|
714 |
st_ptr(G0, vm_result_addr); |
|
715 |
verify_oop(oop_result); |
|
716 |
} |
|
717 |
||
718 |
||
719 |
void MacroAssembler::get_vm_result_2(Register metadata_result) { |
|
720 |
verify_thread(); |
|
721 |
Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset()); |
|
722 |
ld_ptr(vm_result_addr_2, metadata_result); |
|
723 |
st_ptr(G0, vm_result_addr_2); |
|
724 |
} |
|
725 |
||
726 |
||
727 |
// We require that C code which does not return a value in vm_result will |
|
728 |
// leave it undisturbed. |
|
729 |
void MacroAssembler::set_vm_result(Register oop_result) { |
|
730 |
verify_thread(); |
|
731 |
Address vm_result_addr(G2_thread, JavaThread::vm_result_offset()); |
|
732 |
verify_oop(oop_result); |
|
733 |
||
734 |
# ifdef ASSERT |
|
735 |
// Check that we are not overwriting any other oop. |
|
736 |
save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod for -Xprof |
|
737 |
ld_ptr(vm_result_addr, L0); |
|
738 |
tst(L0); |
|
739 |
restore(); |
|
740 |
breakpoint_trap(notZero, Assembler::ptr_cc); |
|
741 |
// } |
|
742 |
# endif |
|
743 |
||
744 |
st_ptr(oop_result, vm_result_addr); |
|
745 |
} |
|
746 |
||
747 |
||
35086
bbf32241d851
8072008: Emit direct call instead of linkTo* for recursive indy/MH.invoke* calls
vlivanov
parents:
34211
diff
changeset
|
748 |
void MacroAssembler::ic_call(address entry, bool emit_delay, jint method_index) { |
bbf32241d851
8072008: Emit direct call instead of linkTo* for recursive indy/MH.invoke* calls
vlivanov
parents:
34211
diff
changeset
|
749 |
RelocationHolder rspec = virtual_call_Relocation::spec(pc(), method_index); |
14631 | 750 |
patchable_set((intptr_t)Universe::non_oop_word(), G5_inline_cache_reg); |
751 |
relocate(rspec); |
|
752 |
call(entry, relocInfo::none); |
|
753 |
if (emit_delay) { |
|
754 |
delayed()->nop(); |
|
755 |
} |
|
756 |
} |
|
757 |
||
758 |
void MacroAssembler::card_table_write(jbyte* byte_map_base, |
|
759 |
Register tmp, Register obj) { |
|
760 |
#ifdef _LP64 |
|
761 |
srlx(obj, CardTableModRefBS::card_shift, obj); |
|
762 |
#else |
|
763 |
srl(obj, CardTableModRefBS::card_shift, obj); |
|
764 |
#endif |
|
765 |
assert(tmp != obj, "need separate temp reg"); |
|
766 |
set((address) byte_map_base, tmp); |
|
767 |
stb(G0, tmp, obj); |
|
768 |
} |
|
769 |
||
770 |
||
771 |
void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { |
|
772 |
address save_pc; |
|
773 |
int shiftcnt; |
|
774 |
#ifdef _LP64 |
|
775 |
# ifdef CHECK_DELAY |
|
776 |
assert_not_delayed((char*) "cannot put two instructions in delay slot"); |
|
777 |
# endif |
|
778 |
v9_dep(); |
|
779 |
save_pc = pc(); |
|
780 |
||
781 |
int msb32 = (int) (addrlit.value() >> 32); |
|
782 |
int lsb32 = (int) (addrlit.value()); |
|
783 |
||
784 |
if (msb32 == 0 && lsb32 >= 0) { |
|
785 |
Assembler::sethi(lsb32, d, addrlit.rspec()); |
|
786 |
} |
|
787 |
else if (msb32 == -1) { |
|
788 |
Assembler::sethi(~lsb32, d, addrlit.rspec()); |
|
789 |
xor3(d, ~low10(~0), d); |
|
790 |
} |
|
791 |
else { |
|
792 |
Assembler::sethi(msb32, d, addrlit.rspec()); // msb 22-bits |
|
793 |
if (msb32 & 0x3ff) // Any bits? |
|
794 |
or3(d, msb32 & 0x3ff, d); // msb 32-bits are now in lsb 32 |
|
795 |
if (lsb32 & 0xFFFFFC00) { // done? |
|
796 |
if ((lsb32 >> 20) & 0xfff) { // Any bits set? |
|
797 |
sllx(d, 12, d); // Make room for next 12 bits |
|
798 |
or3(d, (lsb32 >> 20) & 0xfff, d); // Or in next 12 |
|
799 |
shiftcnt = 0; // We already shifted |
|
800 |
} |
|
801 |
else |
|
802 |
shiftcnt = 12; |
|
803 |
if ((lsb32 >> 10) & 0x3ff) { |
|
804 |
sllx(d, shiftcnt + 10, d); // Make room for last 10 bits |
|
805 |
or3(d, (lsb32 >> 10) & 0x3ff, d); // Or in next 10 |
|
806 |
shiftcnt = 0; |
|
807 |
} |
|
808 |
else |
|
809 |
shiftcnt = 10; |
|
810 |
sllx(d, shiftcnt + 10, d); // Shift leaving disp field 0'd |
|
811 |
} |
|
812 |
else |
|
813 |
sllx(d, 32, d); |
|
814 |
} |
|
815 |
// Pad out the instruction sequence so it can be patched later. |
|
816 |
if (ForceRelocatable || (addrlit.rtype() != relocInfo::none && |
|
817 |
addrlit.rtype() != relocInfo::runtime_call_type)) { |
|
818 |
while (pc() < (save_pc + (7 * BytesPerInstWord))) |
|
819 |
nop(); |
|
820 |
} |
|
821 |
#else |
|
822 |
Assembler::sethi(addrlit.value(), d, addrlit.rspec()); |
|
823 |
#endif |
|
824 |
} |
|
825 |
||
826 |
||
827 |
void MacroAssembler::sethi(const AddressLiteral& addrlit, Register d) { |
|
828 |
internal_sethi(addrlit, d, false); |
|
829 |
} |
|
830 |
||
831 |
||
832 |
void MacroAssembler::patchable_sethi(const AddressLiteral& addrlit, Register d) { |
|
833 |
internal_sethi(addrlit, d, true); |
|
834 |
} |
|
835 |
||
836 |
||
837 |
int MacroAssembler::insts_for_sethi(address a, bool worst_case) { |
|
838 |
#ifdef _LP64 |
|
839 |
if (worst_case) return 7; |
|
840 |
intptr_t iaddr = (intptr_t) a; |
|
841 |
int msb32 = (int) (iaddr >> 32); |
|
842 |
int lsb32 = (int) (iaddr); |
|
843 |
int count; |
|
844 |
if (msb32 == 0 && lsb32 >= 0) |
|
845 |
count = 1; |
|
846 |
else if (msb32 == -1) |
|
847 |
count = 2; |
|
848 |
else { |
|
849 |
count = 2; |
|
850 |
if (msb32 & 0x3ff) |
|
851 |
count++; |
|
852 |
if (lsb32 & 0xFFFFFC00 ) { |
|
853 |
if ((lsb32 >> 20) & 0xfff) count += 2; |
|
854 |
if ((lsb32 >> 10) & 0x3ff) count += 2; |
|
855 |
} |
|
856 |
} |
|
857 |
return count; |
|
858 |
#else |
|
859 |
return 1; |
|
860 |
#endif |
|
861 |
} |
|
862 |
||
863 |
int MacroAssembler::worst_case_insts_for_set() { |
|
864 |
return insts_for_sethi(NULL, true) + 1; |
|
865 |
} |
|
866 |
||
867 |
||
868 |
// Keep in sync with MacroAssembler::insts_for_internal_set |
|
869 |
void MacroAssembler::internal_set(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) { |
|
870 |
intptr_t value = addrlit.value(); |
|
871 |
||
872 |
if (!ForceRelocatable && addrlit.rspec().type() == relocInfo::none) { |
|
873 |
// can optimize |
|
874 |
if (-4096 <= value && value <= 4095) { |
|
875 |
or3(G0, value, d); // setsw (this leaves upper 32 bits sign-extended) |
|
876 |
return; |
|
877 |
} |
|
878 |
if (inv_hi22(hi22(value)) == value) { |
|
879 |
sethi(addrlit, d); |
|
880 |
return; |
|
881 |
} |
|
882 |
} |
|
883 |
assert_not_delayed((char*) "cannot put two instructions in delay slot"); |
|
884 |
internal_sethi(addrlit, d, ForceRelocatable); |
|
885 |
if (ForceRelocatable || addrlit.rspec().type() != relocInfo::none || addrlit.low10() != 0) { |
|
886 |
add(d, addrlit.low10(), d, addrlit.rspec()); |
|
887 |
} |
|
888 |
} |
|
889 |
||
890 |
// Keep in sync with MacroAssembler::internal_set |
|
891 |
int MacroAssembler::insts_for_internal_set(intptr_t value) { |
|
892 |
// can optimize |
|
893 |
if (-4096 <= value && value <= 4095) { |
|
894 |
return 1; |
|
895 |
} |
|
896 |
if (inv_hi22(hi22(value)) == value) { |
|
897 |
return insts_for_sethi((address) value); |
|
898 |
} |
|
899 |
int count = insts_for_sethi((address) value); |
|
900 |
AddressLiteral al(value); |
|
901 |
if (al.low10() != 0) { |
|
902 |
count++; |
|
903 |
} |
|
904 |
return count; |
|
905 |
} |
|
906 |
||
907 |
void MacroAssembler::set(const AddressLiteral& al, Register d) { |
|
908 |
internal_set(al, d, false); |
|
909 |
} |
|
910 |
||
911 |
void MacroAssembler::set(intptr_t value, Register d) { |
|
912 |
AddressLiteral al(value); |
|
913 |
internal_set(al, d, false); |
|
914 |
} |
|
915 |
||
916 |
void MacroAssembler::set(address addr, Register d, RelocationHolder const& rspec) { |
|
917 |
AddressLiteral al(addr, rspec); |
|
918 |
internal_set(al, d, false); |
|
919 |
} |
|
920 |
||
921 |
void MacroAssembler::patchable_set(const AddressLiteral& al, Register d) { |
|
922 |
internal_set(al, d, true); |
|
923 |
} |
|
924 |
||
925 |
void MacroAssembler::patchable_set(intptr_t value, Register d) { |
|
926 |
AddressLiteral al(value); |
|
927 |
internal_set(al, d, true); |
|
928 |
} |
|
929 |
||
930 |
||
931 |
void MacroAssembler::set64(jlong value, Register d, Register tmp) { |
|
932 |
assert_not_delayed(); |
|
933 |
v9_dep(); |
|
934 |
||
935 |
int hi = (int)(value >> 32); |
|
936 |
int lo = (int)(value & ~0); |
|
31515 | 937 |
int bits_33to2 = (int)((value >> 2) & ~0); |
14631 | 938 |
// (Matcher::isSimpleConstant64 knows about the following optimizations.) |
939 |
if (Assembler::is_simm13(lo) && value == lo) { |
|
940 |
or3(G0, lo, d); |
|
941 |
} else if (hi == 0) { |
|
942 |
Assembler::sethi(lo, d); // hardware version zero-extends to upper 32 |
|
943 |
if (low10(lo) != 0) |
|
944 |
or3(d, low10(lo), d); |
|
945 |
} |
|
31515 | 946 |
else if ((hi >> 2) == 0) { |
947 |
Assembler::sethi(bits_33to2, d); // hardware version zero-extends to upper 32 |
|
948 |
sllx(d, 2, d); |
|
949 |
if (low12(lo) != 0) |
|
950 |
or3(d, low12(lo), d); |
|
951 |
} |
|
14631 | 952 |
else if (hi == -1) { |
953 |
Assembler::sethi(~lo, d); // hardware version zero-extends to upper 32 |
|
954 |
xor3(d, low10(lo) ^ ~low10(~0), d); |
|
955 |
} |
|
956 |
else if (lo == 0) { |
|
957 |
if (Assembler::is_simm13(hi)) { |
|
958 |
or3(G0, hi, d); |
|
959 |
} else { |
|
960 |
Assembler::sethi(hi, d); // hardware version zero-extends to upper 32 |
|
961 |
if (low10(hi) != 0) |
|
962 |
or3(d, low10(hi), d); |
|
963 |
} |
|
964 |
sllx(d, 32, d); |
|
965 |
} |
|
966 |
else { |
|
967 |
Assembler::sethi(hi, tmp); |
|
968 |
Assembler::sethi(lo, d); // macro assembler version sign-extends |
|
969 |
if (low10(hi) != 0) |
|
970 |
or3 (tmp, low10(hi), tmp); |
|
971 |
if (low10(lo) != 0) |
|
972 |
or3 ( d, low10(lo), d); |
|
973 |
sllx(tmp, 32, tmp); |
|
974 |
or3 (d, tmp, d); |
|
975 |
} |
|
976 |
} |
|
977 |
||
978 |
int MacroAssembler::insts_for_set64(jlong value) { |
|
979 |
v9_dep(); |
|
980 |
||
981 |
int hi = (int) (value >> 32); |
|
982 |
int lo = (int) (value & ~0); |
|
983 |
int count = 0; |
|
984 |
||
985 |
// (Matcher::isSimpleConstant64 knows about the following optimizations.) |
|
986 |
if (Assembler::is_simm13(lo) && value == lo) { |
|
987 |
count++; |
|
988 |
} else if (hi == 0) { |
|
989 |
count++; |
|
990 |
if (low10(lo) != 0) |
|
991 |
count++; |
|
992 |
} |
|
993 |
else if (hi == -1) { |
|
994 |
count += 2; |
|
995 |
} |
|
996 |
else if (lo == 0) { |
|
997 |
if (Assembler::is_simm13(hi)) { |
|
998 |
count++; |
|
999 |
} else { |
|
1000 |
count++; |
|
1001 |
if (low10(hi) != 0) |
|
1002 |
count++; |
|
1003 |
} |
|
1004 |
count++; |
|
1005 |
} |
|
1006 |
else { |
|
1007 |
count += 2; |
|
1008 |
if (low10(hi) != 0) |
|
1009 |
count++; |
|
1010 |
if (low10(lo) != 0) |
|
1011 |
count++; |
|
1012 |
count += 2; |
|
1013 |
} |
|
1014 |
return count; |
|
1015 |
} |
|
1016 |
||
1017 |
// compute size in bytes of sparc frame, given |
|
1018 |
// number of extraWords |
|
1019 |
int MacroAssembler::total_frame_size_in_bytes(int extraWords) { |
|
1020 |
||
1021 |
int nWords = frame::memory_parameter_word_sp_offset; |
|
1022 |
||
1023 |
nWords += extraWords; |
|
1024 |
||
1025 |
if (nWords & 1) ++nWords; // round up to double-word |
|
1026 |
||
1027 |
return nWords * BytesPerWord; |
|
1028 |
} |
|
1029 |
||
1030 |
||
1031 |
// save_frame: given number of "extra" words in frame, |
|
1032 |
// issue approp. save instruction (p 200, v8 manual) |
|
1033 |
||
1034 |
void MacroAssembler::save_frame(int extraWords) { |
|
1035 |
int delta = -total_frame_size_in_bytes(extraWords); |
|
1036 |
if (is_simm13(delta)) { |
|
1037 |
save(SP, delta, SP); |
|
1038 |
} else { |
|
1039 |
set(delta, G3_scratch); |
|
1040 |
save(SP, G3_scratch, SP); |
|
1041 |
} |
|
1042 |
} |
|
1043 |
||
1044 |
||
1045 |
void MacroAssembler::save_frame_c1(int size_in_bytes) { |
|
1046 |
if (is_simm13(-size_in_bytes)) { |
|
1047 |
save(SP, -size_in_bytes, SP); |
|
1048 |
} else { |
|
1049 |
set(-size_in_bytes, G3_scratch); |
|
1050 |
save(SP, G3_scratch, SP); |
|
1051 |
} |
|
1052 |
} |
|
1053 |
||
1054 |
||
1055 |
void MacroAssembler::save_frame_and_mov(int extraWords, |
|
1056 |
Register s1, Register d1, |
|
1057 |
Register s2, Register d2) { |
|
1058 |
assert_not_delayed(); |
|
1059 |
||
1060 |
// The trick here is to use precisely the same memory word |
|
1061 |
// that trap handlers also use to save the register. |
|
1062 |
// This word cannot be used for any other purpose, but |
|
1063 |
// it works fine to save the register's value, whether or not |
|
1064 |
// an interrupt flushes register windows at any given moment! |
|
1065 |
Address s1_addr; |
|
1066 |
if (s1->is_valid() && (s1->is_in() || s1->is_local())) { |
|
1067 |
s1_addr = s1->address_in_saved_window(); |
|
1068 |
st_ptr(s1, s1_addr); |
|
1069 |
} |
|
1070 |
||
1071 |
Address s2_addr; |
|
1072 |
if (s2->is_valid() && (s2->is_in() || s2->is_local())) { |
|
1073 |
s2_addr = s2->address_in_saved_window(); |
|
1074 |
st_ptr(s2, s2_addr); |
|
1075 |
} |
|
1076 |
||
1077 |
save_frame(extraWords); |
|
1078 |
||
1079 |
if (s1_addr.base() == SP) { |
|
1080 |
ld_ptr(s1_addr.after_save(), d1); |
|
1081 |
} else if (s1->is_valid()) { |
|
1082 |
mov(s1->after_save(), d1); |
|
1083 |
} |
|
1084 |
||
1085 |
if (s2_addr.base() == SP) { |
|
1086 |
ld_ptr(s2_addr.after_save(), d2); |
|
1087 |
} else if (s2->is_valid()) { |
|
1088 |
mov(s2->after_save(), d2); |
|
1089 |
} |
|
1090 |
} |
|
1091 |
||
1092 |
||
1093 |
AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) { |
|
1094 |
assert(oop_recorder() != NULL, "this assembler needs a Recorder"); |
|
1095 |
int index = oop_recorder()->allocate_metadata_index(obj); |
|
1096 |
RelocationHolder rspec = metadata_Relocation::spec(index); |
|
1097 |
return AddressLiteral((address)obj, rspec); |
|
1098 |
} |
|
1099 |
||
1100 |
AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) { |
|
1101 |
assert(oop_recorder() != NULL, "this assembler needs a Recorder"); |
|
1102 |
int index = oop_recorder()->find_index(obj); |
|
1103 |
RelocationHolder rspec = metadata_Relocation::spec(index); |
|
1104 |
return AddressLiteral((address)obj, rspec); |
|
1105 |
} |
|
1106 |
||
1107 |
||
1108 |
AddressLiteral MacroAssembler::constant_oop_address(jobject obj) { |
|
1109 |
assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); |
|
1110 |
assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop"); |
|
1111 |
int oop_index = oop_recorder()->find_index(obj); |
|
1112 |
return AddressLiteral(obj, oop_Relocation::spec(oop_index)); |
|
1113 |
} |
|
1114 |
||
1115 |
void MacroAssembler::set_narrow_oop(jobject obj, Register d) { |
|
1116 |
assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); |
|
1117 |
int oop_index = oop_recorder()->find_index(obj); |
|
1118 |
RelocationHolder rspec = oop_Relocation::spec(oop_index); |
|
1119 |
||
1120 |
assert_not_delayed(); |
|
1121 |
// Relocation with special format (see relocInfo_sparc.hpp). |
|
1122 |
relocate(rspec, 1); |
|
1123 |
// Assembler::sethi(0x3fffff, d); |
|
15116
af423dcb739c
8004537: replace AbstractAssembler emit_long with emit_int32
twisti
parents:
14827
diff
changeset
|
1124 |
emit_int32( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(0x3fffff) ); |
14631 | 1125 |
// Don't add relocation for 'add'. Do patching during 'sethi' processing. |
1126 |
add(d, 0x3ff, d); |
|
1127 |
||
1128 |
} |
|
1129 |
||
1130 |
void MacroAssembler::set_narrow_klass(Klass* k, Register d) { |
|
1131 |
assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); |
|
1132 |
int klass_index = oop_recorder()->find_index(k); |
|
1133 |
RelocationHolder rspec = metadata_Relocation::spec(klass_index); |
|
19319
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18446
diff
changeset
|
1134 |
narrowOop encoded_k = Klass::encode_klass(k); |
14631 | 1135 |
|
1136 |
assert_not_delayed(); |
|
1137 |
// Relocation with special format (see relocInfo_sparc.hpp). |
|
1138 |
relocate(rspec, 1); |
|
1139 |
// Assembler::sethi(encoded_k, d); |
|
15116
af423dcb739c
8004537: replace AbstractAssembler emit_long with emit_int32
twisti
parents:
14827
diff
changeset
|
1140 |
emit_int32( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(encoded_k) ); |
14631 | 1141 |
// Don't add relocation for 'add'. Do patching during 'sethi' processing. |
1142 |
add(d, low10(encoded_k), d); |
|
1143 |
||
1144 |
} |
|
1145 |
||
1146 |
void MacroAssembler::align(int modulus) { |
|
1147 |
while (offset() % modulus != 0) nop(); |
|
1148 |
} |
|
1149 |
||
1150 |
void RegistersForDebugging::print(outputStream* s) { |
|
1151 |
FlagSetting fs(Debugging, true); |
|
1152 |
int j; |
|
1153 |
for (j = 0; j < 8; ++j) { |
|
1154 |
if (j != 6) { s->print("i%d = ", j); os::print_location(s, i[j]); } |
|
1155 |
else { s->print( "fp = " ); os::print_location(s, i[j]); } |
|
1156 |
} |
|
1157 |
s->cr(); |
|
1158 |
||
1159 |
for (j = 0; j < 8; ++j) { |
|
1160 |
s->print("l%d = ", j); os::print_location(s, l[j]); |
|
1161 |
} |
|
1162 |
s->cr(); |
|
1163 |
||
1164 |
for (j = 0; j < 8; ++j) { |
|
1165 |
if (j != 6) { s->print("o%d = ", j); os::print_location(s, o[j]); } |
|
1166 |
else { s->print( "sp = " ); os::print_location(s, o[j]); } |
|
1167 |
} |
|
1168 |
s->cr(); |
|
1169 |
||
1170 |
for (j = 0; j < 8; ++j) { |
|
1171 |
s->print("g%d = ", j); os::print_location(s, g[j]); |
|
1172 |
} |
|
1173 |
s->cr(); |
|
1174 |
||
1175 |
// print out floats with compression |
|
1176 |
for (j = 0; j < 32; ) { |
|
1177 |
jfloat val = f[j]; |
|
1178 |
int last = j; |
|
1179 |
for ( ; last+1 < 32; ++last ) { |
|
1180 |
char b1[1024], b2[1024]; |
|
1181 |
sprintf(b1, "%f", val); |
|
1182 |
sprintf(b2, "%f", f[last+1]); |
|
1183 |
if (strcmp(b1, b2)) |
|
1184 |
break; |
|
1185 |
} |
|
1186 |
s->print("f%d", j); |
|
1187 |
if ( j != last ) s->print(" - f%d", last); |
|
1188 |
s->print(" = %f", val); |
|
1189 |
s->fill_to(25); |
|
24917 | 1190 |
s->print_cr(" (0x%x)", *(int*)&val); |
14631 | 1191 |
j = last + 1; |
1192 |
} |
|
1193 |
s->cr(); |
|
1194 |
||
1195 |
// and doubles (evens only) |
|
1196 |
for (j = 0; j < 32; ) { |
|
1197 |
jdouble val = d[j]; |
|
1198 |
int last = j; |
|
1199 |
for ( ; last+1 < 32; ++last ) { |
|
1200 |
char b1[1024], b2[1024]; |
|
1201 |
sprintf(b1, "%f", val); |
|
1202 |
sprintf(b2, "%f", d[last+1]); |
|
1203 |
if (strcmp(b1, b2)) |
|
1204 |
break; |
|
1205 |
} |
|
1206 |
s->print("d%d", 2 * j); |
|
1207 |
if ( j != last ) s->print(" - d%d", last); |
|
1208 |
s->print(" = %f", val); |
|
1209 |
s->fill_to(30); |
|
1210 |
s->print("(0x%x)", *(int*)&val); |
|
1211 |
s->fill_to(42); |
|
1212 |
s->print_cr("(0x%x)", *(1 + (int*)&val)); |
|
1213 |
j = last + 1; |
|
1214 |
} |
|
1215 |
s->cr(); |
|
1216 |
} |
|
1217 |
||
1218 |
void RegistersForDebugging::save_registers(MacroAssembler* a) { |
|
1219 |
a->sub(FP, round_to(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0); |
|
18097 | 1220 |
a->flushw(); |
14631 | 1221 |
int i; |
1222 |
for (i = 0; i < 8; ++i) { |
|
1223 |
a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, i_offset(i)); |
|
1224 |
a->ld_ptr(as_lRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, l_offset(i)); |
|
1225 |
a->st_ptr(as_oRegister(i)->after_save(), O0, o_offset(i)); |
|
1226 |
a->st_ptr(as_gRegister(i)->after_save(), O0, g_offset(i)); |
|
1227 |
} |
|
1228 |
for (i = 0; i < 32; ++i) { |
|
1229 |
a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i)); |
|
1230 |
} |
|
18097 | 1231 |
for (i = 0; i < 64; i += 2) { |
14631 | 1232 |
a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i)); |
1233 |
} |
|
1234 |
} |
|
1235 |
||
1236 |
void RegistersForDebugging::restore_registers(MacroAssembler* a, Register r) { |
|
1237 |
for (int i = 1; i < 8; ++i) { |
|
1238 |
a->ld_ptr(r, g_offset(i), as_gRegister(i)); |
|
1239 |
} |
|
1240 |
for (int j = 0; j < 32; ++j) { |
|
1241 |
a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j)); |
|
1242 |
} |
|
18097 | 1243 |
for (int k = 0; k < 64; k += 2) { |
14631 | 1244 |
a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k)); |
1245 |
} |
|
1246 |
} |
|
1247 |
||
1248 |
||
1249 |
// pushes double TOS element of FPU stack on CPU stack; pops from FPU stack |
|
1250 |
void MacroAssembler::push_fTOS() { |
|
1251 |
// %%%%%% need to implement this |
|
1252 |
} |
|
1253 |
||
1254 |
// pops double TOS element from CPU stack and pushes on FPU stack |
|
1255 |
void MacroAssembler::pop_fTOS() { |
|
1256 |
// %%%%%% need to implement this |
|
1257 |
} |
|
1258 |
||
1259 |
void MacroAssembler::empty_FPU_stack() { |
|
1260 |
// %%%%%% need to implement this |
|
1261 |
} |
|
1262 |
||
1263 |
void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * file, int line) { |
|
1264 |
// plausibility check for oops |
|
1265 |
if (!VerifyOops) return; |
|
1266 |
||
1267 |
if (reg == G0) return; // always NULL, which is always an oop |
|
1268 |
||
1269 |
BLOCK_COMMENT("verify_oop {"); |
|
1270 |
char buffer[64]; |
|
1271 |
#ifdef COMPILER1 |
|
1272 |
if (CommentedAssembly) { |
|
1273 |
snprintf(buffer, sizeof(buffer), "verify_oop at %d", offset()); |
|
1274 |
block_comment(buffer); |
|
1275 |
} |
|
1276 |
#endif |
|
1277 |
||
16368
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15482
diff
changeset
|
1278 |
const char* real_msg = NULL; |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15482
diff
changeset
|
1279 |
{ |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15482
diff
changeset
|
1280 |
ResourceMark rm; |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15482
diff
changeset
|
1281 |
stringStream ss; |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15482
diff
changeset
|
1282 |
ss.print("%s at offset %d (%s:%d)", msg, offset(), file, line); |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15482
diff
changeset
|
1283 |
real_msg = code_string(ss.as_string()); |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15482
diff
changeset
|
1284 |
} |
14631 | 1285 |
|
1286 |
// Call indirectly to solve generation ordering problem |
|
1287 |
AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address()); |
|
1288 |
||
1289 |
// Make some space on stack above the current register window. |
|
1290 |
// Enough to hold 8 64-bit registers. |
|
1291 |
add(SP,-8*8,SP); |
|
1292 |
||
1293 |
// Save some 64-bit registers; a normal 'save' chops the heads off |
|
1294 |
// of 64-bit longs in the 32-bit build. |
|
1295 |
stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); |
|
1296 |
stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); |
|
1297 |
mov(reg,O0); // Move arg into O0; arg might be in O7 which is about to be crushed |
|
1298 |
stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); |
|
1299 |
||
1300 |
// Size of set() should stay the same |
|
1301 |
patchable_set((intptr_t)real_msg, O1); |
|
1302 |
// Load address to call to into O7 |
|
1303 |
load_ptr_contents(a, O7); |
|
1304 |
// Register call to verify_oop_subroutine |
|
1305 |
callr(O7, G0); |
|
1306 |
delayed()->nop(); |
|
1307 |
// recover frame size |
|
1308 |
add(SP, 8*8,SP); |
|
1309 |
BLOCK_COMMENT("} verify_oop"); |
|
1310 |
} |
|
1311 |
||
1312 |
void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char * file, int line) { |
|
1313 |
// plausibility check for oops |
|
1314 |
if (!VerifyOops) return; |
|
1315 |
||
16368
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15482
diff
changeset
|
1316 |
const char* real_msg = NULL; |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15482
diff
changeset
|
1317 |
{ |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15482
diff
changeset
|
1318 |
ResourceMark rm; |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15482
diff
changeset
|
1319 |
stringStream ss; |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15482
diff
changeset
|
1320 |
ss.print("%s at SP+%d (%s:%d)", msg, addr.disp(), file, line); |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15482
diff
changeset
|
1321 |
real_msg = code_string(ss.as_string()); |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15482
diff
changeset
|
1322 |
} |
14631 | 1323 |
|
1324 |
// Call indirectly to solve generation ordering problem |
|
1325 |
AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address()); |
|
1326 |
||
1327 |
// Make some space on stack above the current register window. |
|
1328 |
// Enough to hold 8 64-bit registers. |
|
1329 |
add(SP,-8*8,SP); |
|
1330 |
||
1331 |
// Save some 64-bit registers; a normal 'save' chops the heads off |
|
1332 |
// of 64-bit longs in the 32-bit build. |
|
1333 |
stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8); |
|
1334 |
stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8); |
|
1335 |
ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed |
|
1336 |
stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8); |
|
1337 |
||
1338 |
// Size of set() should stay the same |
|
1339 |
patchable_set((intptr_t)real_msg, O1); |
|
1340 |
// Load address to call to into O7 |
|
1341 |
load_ptr_contents(a, O7); |
|
1342 |
// Register call to verify_oop_subroutine |
|
1343 |
callr(O7, G0); |
|
1344 |
delayed()->nop(); |
|
1345 |
// recover frame size |
|
1346 |
add(SP, 8*8,SP); |
|
1347 |
} |
|
1348 |
||
1349 |
// side-door communication with signalHandler in os_solaris.cpp |
|
1350 |
address MacroAssembler::_verify_oop_implicit_branch[3] = { NULL }; |
|
1351 |
||
1352 |
// This macro is expanded just once; it creates shared code. Contract: |
|
1353 |
// receives an oop in O0. Must restore O0 & O7 from TLS. Must not smash ANY |
|
1354 |
// registers, including flags. May not use a register 'save', as this blows |
|
1355 |
// the high bits of the O-regs if they contain Long values. Acts as a 'leaf' |
|
1356 |
// call. |
|
1357 |
void MacroAssembler::verify_oop_subroutine() { |
|
1358 |
// Leaf call; no frame. |
|
1359 |
Label succeed, fail, null_or_fail; |
|
1360 |
||
1361 |
// O0 and O7 were saved already (O0 in O0's TLS home, O7 in O5's TLS home). |
|
1362 |
// O0 is now the oop to be checked. O7 is the return address. |
|
1363 |
Register O0_obj = O0; |
|
1364 |
||
1365 |
// Save some more registers for temps. |
|
1366 |
stx(O2,SP,frame::register_save_words*wordSize+STACK_BIAS+2*8); |
|
1367 |
stx(O3,SP,frame::register_save_words*wordSize+STACK_BIAS+3*8); |
|
1368 |
stx(O4,SP,frame::register_save_words*wordSize+STACK_BIAS+4*8); |
|
1369 |
stx(O5,SP,frame::register_save_words*wordSize+STACK_BIAS+5*8); |
|
1370 |
||
1371 |
// Save flags |
|
1372 |
Register O5_save_flags = O5; |
|
1373 |
rdccr( O5_save_flags ); |
|
1374 |
||
1375 |
{ // count number of verifies |
|
1376 |
Register O2_adr = O2; |
|
1377 |
Register O3_accum = O3; |
|
1378 |
inc_counter(StubRoutines::verify_oop_count_addr(), O2_adr, O3_accum); |
|
1379 |
} |
|
1380 |
||
1381 |
Register O2_mask = O2; |
|
1382 |
Register O3_bits = O3; |
|
1383 |
Register O4_temp = O4; |
|
1384 |
||
1385 |
// mark lower end of faulting range |
|
1386 |
assert(_verify_oop_implicit_branch[0] == NULL, "set once"); |
|
1387 |
_verify_oop_implicit_branch[0] = pc(); |
|
1388 |
||
1389 |
// We can't check the mark oop because it could be in the process of |
|
1390 |
// locking or unlocking while this is running. |
|
1391 |
set(Universe::verify_oop_mask (), O2_mask); |
|
1392 |
set(Universe::verify_oop_bits (), O3_bits); |
|
1393 |
||
1394 |
// assert((obj & oop_mask) == oop_bits); |
|
1395 |
and3(O0_obj, O2_mask, O4_temp); |
|
1396 |
cmp_and_brx_short(O4_temp, O3_bits, notEqual, pn, null_or_fail); |
|
1397 |
||
1398 |
if ((NULL_WORD & Universe::verify_oop_mask()) == Universe::verify_oop_bits()) { |
|
1399 |
// the null_or_fail case is useless; must test for null separately |
|
1400 |
br_null_short(O0_obj, pn, succeed); |
|
1401 |
} |
|
1402 |
||
1403 |
// Check the Klass* of this object for being in the right area of memory. |
|
1404 |
// Cannot do the load in the delay above slot in case O0 is null |
|
1405 |
load_klass(O0_obj, O0_obj); |
|
1406 |
// assert((klass != NULL) |
|
1407 |
br_null_short(O0_obj, pn, fail); |
|
1408 |
||
1409 |
wrccr( O5_save_flags ); // Restore CCR's |
|
1410 |
||
1411 |
// mark upper end of faulting range |
|
1412 |
_verify_oop_implicit_branch[1] = pc(); |
|
1413 |
||
1414 |
//----------------------- |
|
1415 |
// all tests pass |
|
1416 |
bind(succeed); |
|
1417 |
||
1418 |
// Restore prior 64-bit registers |
|
1419 |
ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+0*8,O0); |
|
1420 |
ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+1*8,O1); |
|
1421 |
ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+2*8,O2); |
|
1422 |
ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+3*8,O3); |
|
1423 |
ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+4*8,O4); |
|
1424 |
ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+5*8,O5); |
|
1425 |
||
1426 |
retl(); // Leaf return; restore prior O7 in delay slot |
|
1427 |
delayed()->ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+7*8,O7); |
|
1428 |
||
1429 |
//----------------------- |
|
1430 |
bind(null_or_fail); // nulls are less common but OK |
|
1431 |
br_null(O0_obj, false, pt, succeed); |
|
1432 |
delayed()->wrccr( O5_save_flags ); // Restore CCR's |
|
1433 |
||
1434 |
//----------------------- |
|
1435 |
// report failure: |
|
1436 |
bind(fail); |
|
1437 |
_verify_oop_implicit_branch[2] = pc(); |
|
1438 |
||
1439 |
wrccr( O5_save_flags ); // Restore CCR's |
|
1440 |
||
1441 |
save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2)); |
|
1442 |
||
1443 |
// stop_subroutine expects message pointer in I1. |
|
1444 |
mov(I1, O1); |
|
1445 |
||
1446 |
// Restore prior 64-bit registers |
|
1447 |
ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+0*8,I0); |
|
1448 |
ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+1*8,I1); |
|
1449 |
ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+2*8,I2); |
|
1450 |
ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+3*8,I3); |
|
1451 |
ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+4*8,I4); |
|
1452 |
ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+5*8,I5); |
|
1453 |
||
1454 |
// factor long stop-sequence into subroutine to save space |
|
1455 |
assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); |
|
1456 |
||
1457 |
// call indirectly to solve generation ordering problem |
|
1458 |
AddressLiteral al(StubRoutines::Sparc::stop_subroutine_entry_address()); |
|
1459 |
load_ptr_contents(al, O5); |
|
1460 |
jmpl(O5, 0, O7); |
|
1461 |
delayed()->nop(); |
|
1462 |
} |
|
1463 |
||
1464 |
||
1465 |
void MacroAssembler::stop(const char* msg) { |
|
1466 |
// save frame first to get O7 for return address |
|
1467 |
// add one word to size in case struct is odd number of words long |
|
1468 |
// It must be doubleword-aligned for storing doubles into it. |
|
1469 |
||
1470 |
save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2)); |
|
1471 |
||
1472 |
// stop_subroutine expects message pointer in I1. |
|
1473 |
// Size of set() should stay the same |
|
1474 |
patchable_set((intptr_t)msg, O1); |
|
1475 |
||
1476 |
// factor long stop-sequence into subroutine to save space |
|
1477 |
assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet"); |
|
1478 |
||
1479 |
// call indirectly to solve generation ordering problem |
|
1480 |
AddressLiteral a(StubRoutines::Sparc::stop_subroutine_entry_address()); |
|
1481 |
load_ptr_contents(a, O5); |
|
1482 |
jmpl(O5, 0, O7); |
|
1483 |
delayed()->nop(); |
|
1484 |
||
1485 |
breakpoint_trap(); // make stop actually stop rather than writing |
|
1486 |
// unnoticeable results in the output files. |
|
1487 |
||
1488 |
// restore(); done in callee to save space! |
|
1489 |
} |
|
1490 |
||
1491 |
||
1492 |
void MacroAssembler::warn(const char* msg) { |
|
1493 |
save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2)); |
|
1494 |
RegistersForDebugging::save_registers(this); |
|
1495 |
mov(O0, L0); |
|
1496 |
// Size of set() should stay the same |
|
1497 |
patchable_set((intptr_t)msg, O0); |
|
1498 |
call( CAST_FROM_FN_PTR(address, warning) ); |
|
1499 |
delayed()->nop(); |
|
1500 |
// ret(); |
|
1501 |
// delayed()->restore(); |
|
1502 |
RegistersForDebugging::restore_registers(this, L0); |
|
1503 |
restore(); |
|
1504 |
} |
|
1505 |
||
1506 |
||
1507 |
void MacroAssembler::untested(const char* what) { |
|
1508 |
// We must be able to turn interactive prompting off |
|
1509 |
// in order to run automated test scripts on the VM |
|
1510 |
// Use the flag ShowMessageBoxOnError |
|
1511 |
||
16368
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15482
diff
changeset
|
1512 |
const char* b = NULL; |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15482
diff
changeset
|
1513 |
{ |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15482
diff
changeset
|
1514 |
ResourceMark rm; |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15482
diff
changeset
|
1515 |
stringStream ss; |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15482
diff
changeset
|
1516 |
ss.print("untested: %s", what); |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15482
diff
changeset
|
1517 |
b = code_string(ss.as_string()); |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15482
diff
changeset
|
1518 |
} |
14631 | 1519 |
if (ShowMessageBoxOnError) { STOP(b); } |
1520 |
else { warn(b); } |
|
1521 |
} |
|
1522 |
||
1523 |
||
1524 |
void MacroAssembler::stop_subroutine() { |
|
1525 |
RegistersForDebugging::save_registers(this); |
|
1526 |
||
1527 |
// for the sake of the debugger, stick a PC on the current frame |
|
1528 |
// (this assumes that the caller has performed an extra "save") |
|
1529 |
mov(I7, L7); |
|
1530 |
add(O7, -7 * BytesPerInt, I7); |
|
1531 |
||
1532 |
save_frame(); // one more save to free up another O7 register |
|
1533 |
mov(I0, O1); // addr of reg save area |
|
1534 |
||
1535 |
// We expect pointer to message in I1. Caller must set it up in O1 |
|
1536 |
mov(I1, O0); // get msg |
|
1537 |
call (CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type); |
|
1538 |
delayed()->nop(); |
|
1539 |
||
1540 |
restore(); |
|
1541 |
||
1542 |
RegistersForDebugging::restore_registers(this, O0); |
|
1543 |
||
1544 |
save_frame(0); |
|
1545 |
call(CAST_FROM_FN_PTR(address,breakpoint)); |
|
1546 |
delayed()->nop(); |
|
1547 |
restore(); |
|
1548 |
||
1549 |
mov(L7, I7); |
|
1550 |
retl(); |
|
1551 |
delayed()->restore(); // see stop above |
|
1552 |
} |
|
1553 |
||
1554 |
||
1555 |
void MacroAssembler::debug(char* msg, RegistersForDebugging* regs) { |
|
1556 |
if ( ShowMessageBoxOnError ) { |
|
1557 |
JavaThread* thread = JavaThread::current(); |
|
1558 |
JavaThreadState saved_state = thread->thread_state(); |
|
1559 |
thread->set_thread_state(_thread_in_vm); |
|
1560 |
{ |
|
1561 |
// In order to get locks work, we need to fake a in_VM state |
|
1562 |
ttyLocker ttyl; |
|
1563 |
::tty->print_cr("EXECUTION STOPPED: %s\n", msg); |
|
1564 |
if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { |
|
1565 |
BytecodeCounter::print(); |
|
1566 |
} |
|
1567 |
if (os::message_box(msg, "Execution stopped, print registers?")) |
|
1568 |
regs->print(::tty); |
|
1569 |
} |
|
1570 |
BREAKPOINT; |
|
1571 |
ThreadStateTransition::transition(JavaThread::current(), _thread_in_vm, saved_state); |
|
1572 |
} |
|
1573 |
else { |
|
1574 |
::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg); |
|
1575 |
} |
|
33105
294e48b4f704
8080775: Better argument formatting for assert() and friends
david
parents:
32596
diff
changeset
|
1576 |
assert(false, "DEBUG MESSAGE: %s", msg); |
14631 | 1577 |
} |
1578 |
||
1579 |
||
1580 |
void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresult) { |
|
1581 |
subcc( Rparam_words, Argument::n_register_parameters, Rresult); // how many mem words? |
|
1582 |
Label no_extras; |
|
1583 |
br( negative, true, pt, no_extras ); // if neg, clear reg |
|
1584 |
delayed()->set(0, Rresult); // annuled, so only if taken |
|
1585 |
bind( no_extras ); |
|
1586 |
} |
|
1587 |
||
1588 |
||
1589 |
void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) { |
|
1590 |
#ifdef _LP64 |
|
1591 |
add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult); |
|
1592 |
#else |
|
1593 |
add(Rextra_words, frame::memory_parameter_word_sp_offset + 1, Rresult); |
|
1594 |
#endif |
|
1595 |
bclr(1, Rresult); |
|
1596 |
sll(Rresult, LogBytesPerWord, Rresult); // Rresult has total frame bytes |
|
1597 |
} |
|
1598 |
||
1599 |
||
1600 |
void MacroAssembler::calc_frame_size_and_save(Register Rextra_words, Register Rresult) { |
|
1601 |
calc_frame_size(Rextra_words, Rresult); |
|
1602 |
neg(Rresult); |
|
1603 |
save(SP, Rresult, SP); |
|
1604 |
} |
|
1605 |
||
1606 |
||
1607 |
// --------------------------------------------------------- |
|
1608 |
Assembler::RCondition cond2rcond(Assembler::Condition c) { |
|
1609 |
switch (c) { |
|
1610 |
/*case zero: */ |
|
1611 |
case Assembler::equal: return Assembler::rc_z; |
|
1612 |
case Assembler::lessEqual: return Assembler::rc_lez; |
|
1613 |
case Assembler::less: return Assembler::rc_lz; |
|
1614 |
/*case notZero:*/ |
|
1615 |
case Assembler::notEqual: return Assembler::rc_nz; |
|
1616 |
case Assembler::greater: return Assembler::rc_gz; |
|
1617 |
case Assembler::greaterEqual: return Assembler::rc_gez; |
|
1618 |
} |
|
1619 |
ShouldNotReachHere(); |
|
1620 |
return Assembler::rc_z; |
|
1621 |
} |
|
1622 |
||
1623 |
// compares (32 bit) register with zero and branches. NOT FOR USE WITH 64-bit POINTERS |
|
1624 |
void MacroAssembler::cmp_zero_and_br(Condition c, Register s1, Label& L, bool a, Predict p) { |
|
1625 |
tst(s1); |
|
1626 |
br (c, a, p, L); |
|
1627 |
} |
|
1628 |
||
1629 |
// Compares a pointer register with zero and branches on null. |
|
1630 |
// Does a test & branch on 32-bit systems and a register-branch on 64-bit. |
|
1631 |
void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) { |
|
1632 |
assert_not_delayed(); |
|
1633 |
#ifdef _LP64 |
|
1634 |
bpr( rc_z, a, p, s1, L ); |
|
1635 |
#else |
|
1636 |
tst(s1); |
|
1637 |
br ( zero, a, p, L ); |
|
1638 |
#endif |
|
1639 |
} |
|
1640 |
||
1641 |
void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) { |
|
1642 |
assert_not_delayed(); |
|
1643 |
#ifdef _LP64 |
|
1644 |
bpr( rc_nz, a, p, s1, L ); |
|
1645 |
#else |
|
1646 |
tst(s1); |
|
1647 |
br ( notZero, a, p, L ); |
|
1648 |
#endif |
|
1649 |
} |
|
1650 |
||
1651 |
// Compare registers and branch with nop in delay slot or cbcond without delay slot. |
|
1652 |
||
1653 |
// Compare integer (32 bit) values (icc only). |
|
1654 |
void MacroAssembler::cmp_and_br_short(Register s1, Register s2, Condition c, |
|
1655 |
Predict p, Label& L) { |
|
1656 |
assert_not_delayed(); |
|
1657 |
if (use_cbcond(L)) { |
|
1658 |
Assembler::cbcond(c, icc, s1, s2, L); |
|
1659 |
} else { |
|
1660 |
cmp(s1, s2); |
|
1661 |
br(c, false, p, L); |
|
1662 |
delayed()->nop(); |
|
1663 |
} |
|
1664 |
} |
|
1665 |
||
1666 |
// Compare integer (32 bit) values (icc only). |
|
1667 |
void MacroAssembler::cmp_and_br_short(Register s1, int simm13a, Condition c, |
|
1668 |
Predict p, Label& L) { |
|
1669 |
assert_not_delayed(); |
|
1670 |
if (is_simm(simm13a,5) && use_cbcond(L)) { |
|
1671 |
Assembler::cbcond(c, icc, s1, simm13a, L); |
|
1672 |
} else { |
|
1673 |
cmp(s1, simm13a); |
|
1674 |
br(c, false, p, L); |
|
1675 |
delayed()->nop(); |
|
1676 |
} |
|
1677 |
} |
|
1678 |
||
1679 |
// Branch that tests xcc in LP64 and icc in !LP64 |
|
1680 |
void MacroAssembler::cmp_and_brx_short(Register s1, Register s2, Condition c, |
|
1681 |
Predict p, Label& L) { |
|
1682 |
assert_not_delayed(); |
|
1683 |
if (use_cbcond(L)) { |
|
1684 |
Assembler::cbcond(c, ptr_cc, s1, s2, L); |
|
1685 |
} else { |
|
1686 |
cmp(s1, s2); |
|
1687 |
brx(c, false, p, L); |
|
1688 |
delayed()->nop(); |
|
1689 |
} |
|
1690 |
} |
|
1691 |
||
1692 |
// Branch that tests xcc in LP64 and icc in !LP64 |
|
1693 |
void MacroAssembler::cmp_and_brx_short(Register s1, int simm13a, Condition c, |
|
1694 |
Predict p, Label& L) { |
|
1695 |
assert_not_delayed(); |
|
1696 |
if (is_simm(simm13a,5) && use_cbcond(L)) { |
|
1697 |
Assembler::cbcond(c, ptr_cc, s1, simm13a, L); |
|
1698 |
} else { |
|
1699 |
cmp(s1, simm13a); |
|
1700 |
brx(c, false, p, L); |
|
1701 |
delayed()->nop(); |
|
1702 |
} |
|
1703 |
} |
|
1704 |
||
1705 |
// Short branch version for compares a pointer with zero. |
|
1706 |
||
1707 |
void MacroAssembler::br_null_short(Register s1, Predict p, Label& L) { |
|
1708 |
assert_not_delayed(); |
|
1709 |
if (use_cbcond(L)) { |
|
1710 |
Assembler::cbcond(zero, ptr_cc, s1, 0, L); |
|
1711 |
return; |
|
1712 |
} |
|
1713 |
br_null(s1, false, p, L); |
|
1714 |
delayed()->nop(); |
|
1715 |
} |
|
1716 |
||
1717 |
void MacroAssembler::br_notnull_short(Register s1, Predict p, Label& L) { |
|
1718 |
assert_not_delayed(); |
|
1719 |
if (use_cbcond(L)) { |
|
1720 |
Assembler::cbcond(notZero, ptr_cc, s1, 0, L); |
|
1721 |
return; |
|
1722 |
} |
|
1723 |
br_notnull(s1, false, p, L); |
|
1724 |
delayed()->nop(); |
|
1725 |
} |
|
1726 |
||
1727 |
// Unconditional short branch |
|
1728 |
void MacroAssembler::ba_short(Label& L) { |
|
1729 |
if (use_cbcond(L)) { |
|
1730 |
Assembler::cbcond(equal, icc, G0, G0, L); |
|
1731 |
return; |
|
1732 |
} |
|
1733 |
br(always, false, pt, L); |
|
1734 |
delayed()->nop(); |
|
1735 |
} |
|
1736 |
||
1737 |
// instruction sequences factored across compiler & interpreter |
|
1738 |
||
1739 |
||
1740 |
void MacroAssembler::lcmp( Register Ra_hi, Register Ra_low, |
|
1741 |
Register Rb_hi, Register Rb_low, |
|
1742 |
Register Rresult) { |
|
1743 |
||
1744 |
Label check_low_parts, done; |
|
1745 |
||
1746 |
cmp(Ra_hi, Rb_hi ); // compare hi parts |
|
1747 |
br(equal, true, pt, check_low_parts); |
|
1748 |
delayed()->cmp(Ra_low, Rb_low); // test low parts |
|
1749 |
||
1750 |
// And, with an unsigned comparison, it does not matter if the numbers |
|
1751 |
// are negative or not. |
|
1752 |
// E.g., -2 cmp -1: the low parts are 0xfffffffe and 0xffffffff. |
|
1753 |
// The second one is bigger (unsignedly). |
|
1754 |
||
1755 |
// Other notes: The first move in each triplet can be unconditional |
|
1756 |
// (and therefore probably prefetchable). |
|
1757 |
// And the equals case for the high part does not need testing, |
|
1758 |
// since that triplet is reached only after finding the high halves differ. |
|
1759 |
||
18097 | 1760 |
mov(-1, Rresult); |
1761 |
ba(done); |
|
1762 |
delayed()->movcc(greater, false, icc, 1, Rresult); |
|
1763 |
||
1764 |
bind(check_low_parts); |
|
1765 |
||
1766 |
mov( -1, Rresult); |
|
1767 |
movcc(equal, false, icc, 0, Rresult); |
|
1768 |
movcc(greaterUnsigned, false, icc, 1, Rresult); |
|
1769 |
||
1770 |
bind(done); |
|
14631 | 1771 |
} |
1772 |
||
1773 |
void MacroAssembler::lneg( Register Rhi, Register Rlow ) { |
|
1774 |
subcc( G0, Rlow, Rlow ); |
|
1775 |
subc( G0, Rhi, Rhi ); |
|
1776 |
} |
|
1777 |
||
1778 |
void MacroAssembler::lshl( Register Rin_high, Register Rin_low, |
|
1779 |
Register Rcount, |
|
1780 |
Register Rout_high, Register Rout_low, |
|
1781 |
Register Rtemp ) { |
|
1782 |
||
1783 |
||
1784 |
Register Ralt_count = Rtemp; |
|
1785 |
Register Rxfer_bits = Rtemp; |
|
1786 |
||
1787 |
assert( Ralt_count != Rin_high |
|
1788 |
&& Ralt_count != Rin_low |
|
1789 |
&& Ralt_count != Rcount |
|
1790 |
&& Rxfer_bits != Rin_low |
|
1791 |
&& Rxfer_bits != Rin_high |
|
1792 |
&& Rxfer_bits != Rcount |
|
1793 |
&& Rxfer_bits != Rout_low |
|
1794 |
&& Rout_low != Rin_high, |
|
1795 |
"register alias checks"); |
|
1796 |
||
1797 |
Label big_shift, done; |
|
1798 |
||
1799 |
// This code can be optimized to use the 64 bit shifts in V9. |
|
1800 |
// Here we use the 32 bit shifts. |
|
1801 |
||
1802 |
and3( Rcount, 0x3f, Rcount); // take least significant 6 bits |
|
1803 |
subcc(Rcount, 31, Ralt_count); |
|
1804 |
br(greater, true, pn, big_shift); |
|
1805 |
delayed()->dec(Ralt_count); |
|
1806 |
||
1807 |
// shift < 32 bits, Ralt_count = Rcount-31 |
|
1808 |
||
1809 |
// We get the transfer bits by shifting right by 32-count the low |
|
1810 |
// register. This is done by shifting right by 31-count and then by one |
|
1811 |
// more to take care of the special (rare) case where count is zero |
|
1812 |
// (shifting by 32 would not work). |
|
1813 |
||
1814 |
neg(Ralt_count); |
|
1815 |
||
1816 |
// The order of the next two instructions is critical in the case where |
|
1817 |
// Rin and Rout are the same and should not be reversed. |
|
1818 |
||
1819 |
srl(Rin_low, Ralt_count, Rxfer_bits); // shift right by 31-count |
|
1820 |
if (Rcount != Rout_low) { |
|
1821 |
sll(Rin_low, Rcount, Rout_low); // low half |
|
1822 |
} |
|
1823 |
sll(Rin_high, Rcount, Rout_high); |
|
1824 |
if (Rcount == Rout_low) { |
|
1825 |
sll(Rin_low, Rcount, Rout_low); // low half |
|
1826 |
} |
|
1827 |
srl(Rxfer_bits, 1, Rxfer_bits ); // shift right by one more |
|
1828 |
ba(done); |
|
1829 |
delayed()->or3(Rout_high, Rxfer_bits, Rout_high); // new hi value: or in shifted old hi part and xfer from low |
|
1830 |
||
1831 |
// shift >= 32 bits, Ralt_count = Rcount-32 |
|
1832 |
bind(big_shift); |
|
1833 |
sll(Rin_low, Ralt_count, Rout_high ); |
|
1834 |
clr(Rout_low); |
|
1835 |
||
1836 |
bind(done); |
|
1837 |
} |
|
1838 |
||
1839 |
||
1840 |
void MacroAssembler::lshr( Register Rin_high, Register Rin_low, |
|
1841 |
Register Rcount, |
|
1842 |
Register Rout_high, Register Rout_low, |
|
1843 |
Register Rtemp ) { |
|
1844 |
||
1845 |
Register Ralt_count = Rtemp; |
|
1846 |
Register Rxfer_bits = Rtemp; |
|
1847 |
||
1848 |
assert( Ralt_count != Rin_high |
|
1849 |
&& Ralt_count != Rin_low |
|
1850 |
&& Ralt_count != Rcount |
|
1851 |
&& Rxfer_bits != Rin_low |
|
1852 |
&& Rxfer_bits != Rin_high |
|
1853 |
&& Rxfer_bits != Rcount |
|
1854 |
&& Rxfer_bits != Rout_high |
|
1855 |
&& Rout_high != Rin_low, |
|
1856 |
"register alias checks"); |
|
1857 |
||
1858 |
Label big_shift, done; |
|
1859 |
||
1860 |
// This code can be optimized to use the 64 bit shifts in V9. |
|
1861 |
// Here we use the 32 bit shifts. |
|
1862 |
||
1863 |
and3( Rcount, 0x3f, Rcount); // take least significant 6 bits |
|
1864 |
subcc(Rcount, 31, Ralt_count); |
|
1865 |
br(greater, true, pn, big_shift); |
|
1866 |
delayed()->dec(Ralt_count); |
|
1867 |
||
1868 |
// shift < 32 bits, Ralt_count = Rcount-31 |
|
1869 |
||
1870 |
// We get the transfer bits by shifting left by 32-count the high |
|
1871 |
// register. This is done by shifting left by 31-count and then by one |
|
1872 |
// more to take care of the special (rare) case where count is zero |
|
1873 |
// (shifting by 32 would not work). |
|
1874 |
||
1875 |
neg(Ralt_count); |
|
1876 |
if (Rcount != Rout_low) { |
|
1877 |
srl(Rin_low, Rcount, Rout_low); |
|
1878 |
} |
|
1879 |
||
1880 |
// The order of the next two instructions is critical in the case where |
|
1881 |
// Rin and Rout are the same and should not be reversed. |
|
1882 |
||
1883 |
sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count |
|
1884 |
sra(Rin_high, Rcount, Rout_high ); // high half |
|
1885 |
sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more |
|
1886 |
if (Rcount == Rout_low) { |
|
1887 |
srl(Rin_low, Rcount, Rout_low); |
|
1888 |
} |
|
1889 |
ba(done); |
|
1890 |
delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high |
|
1891 |
||
1892 |
// shift >= 32 bits, Ralt_count = Rcount-32 |
|
1893 |
bind(big_shift); |
|
1894 |
||
1895 |
sra(Rin_high, Ralt_count, Rout_low); |
|
1896 |
sra(Rin_high, 31, Rout_high); // sign into hi |
|
1897 |
||
1898 |
bind( done ); |
|
1899 |
} |
|
1900 |
||
1901 |
||
1902 |
||
1903 |
void MacroAssembler::lushr( Register Rin_high, Register Rin_low, |
|
1904 |
Register Rcount, |
|
1905 |
Register Rout_high, Register Rout_low, |
|
1906 |
Register Rtemp ) { |
|
1907 |
||
1908 |
Register Ralt_count = Rtemp; |
|
1909 |
Register Rxfer_bits = Rtemp; |
|
1910 |
||
1911 |
assert( Ralt_count != Rin_high |
|
1912 |
&& Ralt_count != Rin_low |
|
1913 |
&& Ralt_count != Rcount |
|
1914 |
&& Rxfer_bits != Rin_low |
|
1915 |
&& Rxfer_bits != Rin_high |
|
1916 |
&& Rxfer_bits != Rcount |
|
1917 |
&& Rxfer_bits != Rout_high |
|
1918 |
&& Rout_high != Rin_low, |
|
1919 |
"register alias checks"); |
|
1920 |
||
1921 |
Label big_shift, done; |
|
1922 |
||
1923 |
// This code can be optimized to use the 64 bit shifts in V9. |
|
1924 |
// Here we use the 32 bit shifts. |
|
1925 |
||
1926 |
and3( Rcount, 0x3f, Rcount); // take least significant 6 bits |
|
1927 |
subcc(Rcount, 31, Ralt_count); |
|
1928 |
br(greater, true, pn, big_shift); |
|
1929 |
delayed()->dec(Ralt_count); |
|
1930 |
||
1931 |
// shift < 32 bits, Ralt_count = Rcount-31 |
|
1932 |
||
1933 |
// We get the transfer bits by shifting left by 32-count the high |
|
1934 |
// register. This is done by shifting left by 31-count and then by one |
|
1935 |
// more to take care of the special (rare) case where count is zero |
|
1936 |
// (shifting by 32 would not work). |
|
1937 |
||
1938 |
neg(Ralt_count); |
|
1939 |
if (Rcount != Rout_low) { |
|
1940 |
srl(Rin_low, Rcount, Rout_low); |
|
1941 |
} |
|
1942 |
||
1943 |
// The order of the next two instructions is critical in the case where |
|
1944 |
// Rin and Rout are the same and should not be reversed. |
|
1945 |
||
1946 |
sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count |
|
1947 |
srl(Rin_high, Rcount, Rout_high ); // high half |
|
1948 |
sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more |
|
1949 |
if (Rcount == Rout_low) { |
|
1950 |
srl(Rin_low, Rcount, Rout_low); |
|
1951 |
} |
|
1952 |
ba(done); |
|
1953 |
delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high |
|
1954 |
||
1955 |
// shift >= 32 bits, Ralt_count = Rcount-32 |
|
1956 |
bind(big_shift); |
|
1957 |
||
1958 |
srl(Rin_high, Ralt_count, Rout_low); |
|
1959 |
clr(Rout_high); |
|
1960 |
||
1961 |
bind( done ); |
|
1962 |
} |
|
1963 |
||
1964 |
#ifdef _LP64 |
|
1965 |
void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) { |
|
1966 |
cmp(Ra, Rb); |
|
1967 |
mov(-1, Rresult); |
|
1968 |
movcc(equal, false, xcc, 0, Rresult); |
|
1969 |
movcc(greater, false, xcc, 1, Rresult); |
|
1970 |
} |
|
1971 |
#endif |
|
1972 |
||
1973 |
||
1974 |
void MacroAssembler::load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed) { |
|
1975 |
switch (size_in_bytes) { |
|
1976 |
case 8: ld_long(src, dst); break; |
|
1977 |
case 4: ld( src, dst); break; |
|
1978 |
case 2: is_signed ? ldsh(src, dst) : lduh(src, dst); break; |
|
1979 |
case 1: is_signed ? ldsb(src, dst) : ldub(src, dst); break; |
|
1980 |
default: ShouldNotReachHere(); |
|
1981 |
} |
|
1982 |
} |
|
1983 |
||
1984 |
void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) { |
|
1985 |
switch (size_in_bytes) { |
|
1986 |
case 8: st_long(src, dst); break; |
|
1987 |
case 4: st( src, dst); break; |
|
1988 |
case 2: sth( src, dst); break; |
|
1989 |
case 1: stb( src, dst); break; |
|
1990 |
default: ShouldNotReachHere(); |
|
1991 |
} |
|
1992 |
} |
|
1993 |
||
1994 |
||
1995 |
void MacroAssembler::float_cmp( bool is_float, int unordered_result, |
|
1996 |
FloatRegister Fa, FloatRegister Fb, |
|
1997 |
Register Rresult) { |
|
18097 | 1998 |
if (is_float) { |
1999 |
fcmp(FloatRegisterImpl::S, fcc0, Fa, Fb); |
|
14631 | 2000 |
} else { |
18097 | 2001 |
fcmp(FloatRegisterImpl::D, fcc0, Fa, Fb); |
14631 | 2002 |
} |
18097 | 2003 |
|
2004 |
if (unordered_result == 1) { |
|
2005 |
mov( -1, Rresult); |
|
2006 |
movcc(f_equal, true, fcc0, 0, Rresult); |
|
2007 |
movcc(f_unorderedOrGreater, true, fcc0, 1, Rresult); |
|
14631 | 2008 |
} else { |
18097 | 2009 |
mov( -1, Rresult); |
2010 |
movcc(f_equal, true, fcc0, 0, Rresult); |
|
2011 |
movcc(f_greater, true, fcc0, 1, Rresult); |
|
14631 | 2012 |
} |
2013 |
} |
|
2014 |
||
2015 |
||
2016 |
void MacroAssembler::save_all_globals_into_locals() { |
|
2017 |
mov(G1,L1); |
|
2018 |
mov(G2,L2); |
|
2019 |
mov(G3,L3); |
|
2020 |
mov(G4,L4); |
|
2021 |
mov(G5,L5); |
|
2022 |
mov(G6,L6); |
|
2023 |
mov(G7,L7); |
|
2024 |
} |
|
2025 |
||
2026 |
void MacroAssembler::restore_globals_from_locals() { |
|
2027 |
mov(L1,G1); |
|
2028 |
mov(L2,G2); |
|
2029 |
mov(L3,G3); |
|
2030 |
mov(L4,G4); |
|
2031 |
mov(L5,G5); |
|
2032 |
mov(L6,G6); |
|
2033 |
mov(L7,G7); |
|
2034 |
} |
|
2035 |
||
2036 |
RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, |
|
2037 |
Register tmp, |
|
2038 |
int offset) { |
|
2039 |
intptr_t value = *delayed_value_addr; |
|
2040 |
if (value != 0) |
|
2041 |
return RegisterOrConstant(value + offset); |
|
2042 |
||
2043 |
// load indirectly to solve generation ordering problem |
|
2044 |
AddressLiteral a(delayed_value_addr); |
|
2045 |
load_ptr_contents(a, tmp); |
|
2046 |
||
2047 |
#ifdef ASSERT |
|
2048 |
tst(tmp); |
|
2049 |
breakpoint_trap(zero, xcc); |
|
2050 |
#endif |
|
2051 |
||
2052 |
if (offset != 0) |
|
2053 |
add(tmp, offset, tmp); |
|
2054 |
||
2055 |
return RegisterOrConstant(tmp); |
|
2056 |
} |
|
2057 |
||
2058 |
||
2059 |
RegisterOrConstant MacroAssembler::regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { |
|
2060 |
assert(d.register_or_noreg() != G0, "lost side effect"); |
|
2061 |
if ((s2.is_constant() && s2.as_constant() == 0) || |
|
2062 |
(s2.is_register() && s2.as_register() == G0)) { |
|
2063 |
// Do nothing, just move value. |
|
2064 |
if (s1.is_register()) { |
|
2065 |
if (d.is_constant()) d = temp; |
|
2066 |
mov(s1.as_register(), d.as_register()); |
|
2067 |
return d; |
|
2068 |
} else { |
|
2069 |
return s1; |
|
2070 |
} |
|
2071 |
} |
|
2072 |
||
2073 |
if (s1.is_register()) { |
|
2074 |
assert_different_registers(s1.as_register(), temp); |
|
2075 |
if (d.is_constant()) d = temp; |
|
2076 |
andn(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); |
|
2077 |
return d; |
|
2078 |
} else { |
|
2079 |
if (s2.is_register()) { |
|
2080 |
assert_different_registers(s2.as_register(), temp); |
|
2081 |
if (d.is_constant()) d = temp; |
|
2082 |
set(s1.as_constant(), temp); |
|
2083 |
andn(temp, s2.as_register(), d.as_register()); |
|
2084 |
return d; |
|
2085 |
} else { |
|
2086 |
intptr_t res = s1.as_constant() & ~s2.as_constant(); |
|
2087 |
return res; |
|
2088 |
} |
|
2089 |
} |
|
2090 |
} |
|
2091 |
||
2092 |
RegisterOrConstant MacroAssembler::regcon_inc_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { |
|
2093 |
assert(d.register_or_noreg() != G0, "lost side effect"); |
|
2094 |
if ((s2.is_constant() && s2.as_constant() == 0) || |
|
2095 |
(s2.is_register() && s2.as_register() == G0)) { |
|
2096 |
// Do nothing, just move value. |
|
2097 |
if (s1.is_register()) { |
|
2098 |
if (d.is_constant()) d = temp; |
|
2099 |
mov(s1.as_register(), d.as_register()); |
|
2100 |
return d; |
|
2101 |
} else { |
|
2102 |
return s1; |
|
2103 |
} |
|
2104 |
} |
|
2105 |
||
2106 |
if (s1.is_register()) { |
|
2107 |
assert_different_registers(s1.as_register(), temp); |
|
2108 |
if (d.is_constant()) d = temp; |
|
2109 |
add(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); |
|
2110 |
return d; |
|
2111 |
} else { |
|
2112 |
if (s2.is_register()) { |
|
2113 |
assert_different_registers(s2.as_register(), temp); |
|
2114 |
if (d.is_constant()) d = temp; |
|
2115 |
add(s2.as_register(), ensure_simm13_or_reg(s1, temp), d.as_register()); |
|
2116 |
return d; |
|
2117 |
} else { |
|
2118 |
intptr_t res = s1.as_constant() + s2.as_constant(); |
|
2119 |
return res; |
|
2120 |
} |
|
2121 |
} |
|
2122 |
} |
|
2123 |
||
2124 |
RegisterOrConstant MacroAssembler::regcon_sll_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) { |
|
2125 |
assert(d.register_or_noreg() != G0, "lost side effect"); |
|
2126 |
if (!is_simm13(s2.constant_or_zero())) |
|
2127 |
s2 = (s2.as_constant() & 0xFF); |
|
2128 |
if ((s2.is_constant() && s2.as_constant() == 0) || |
|
2129 |
(s2.is_register() && s2.as_register() == G0)) { |
|
2130 |
// Do nothing, just move value. |
|
2131 |
if (s1.is_register()) { |
|
2132 |
if (d.is_constant()) d = temp; |
|
2133 |
mov(s1.as_register(), d.as_register()); |
|
2134 |
return d; |
|
2135 |
} else { |
|
2136 |
return s1; |
|
2137 |
} |
|
2138 |
} |
|
2139 |
||
2140 |
if (s1.is_register()) { |
|
2141 |
assert_different_registers(s1.as_register(), temp); |
|
2142 |
if (d.is_constant()) d = temp; |
|
2143 |
sll_ptr(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register()); |
|
2144 |
return d; |
|
2145 |
} else { |
|
2146 |
if (s2.is_register()) { |
|
2147 |
assert_different_registers(s2.as_register(), temp); |
|
2148 |
if (d.is_constant()) d = temp; |
|
2149 |
set(s1.as_constant(), temp); |
|
2150 |
sll_ptr(temp, s2.as_register(), d.as_register()); |
|
2151 |
return d; |
|
2152 |
} else { |
|
2153 |
intptr_t res = s1.as_constant() << s2.as_constant(); |
|
2154 |
return res; |
|
2155 |
} |
|
2156 |
} |
|
2157 |
} |
|
2158 |
||
2159 |
||
2160 |
// Look up the method for a megamorphic invokeinterface call. |
|
2161 |
// The target method is determined by <intf_klass, itable_index>. |
|
2162 |
// The receiver klass is in recv_klass. |
|
2163 |
// On success, the result will be in method_result, and execution falls through. |
|
2164 |
// On failure, execution transfers to the given label. |
|
2165 |
void MacroAssembler::lookup_interface_method(Register recv_klass, |
|
2166 |
Register intf_klass, |
|
2167 |
RegisterOrConstant itable_index, |
|
2168 |
Register method_result, |
|
2169 |
Register scan_temp, |
|
2170 |
Register sethi_temp, |
|
2171 |
Label& L_no_such_interface) { |
|
2172 |
assert_different_registers(recv_klass, intf_klass, method_result, scan_temp); |
|
2173 |
assert(itable_index.is_constant() || itable_index.as_register() == method_result, |
|
2174 |
"caller must use same register for non-constant itable index as for method"); |
|
2175 |
||
2176 |
Label L_no_such_interface_restore; |
|
2177 |
bool did_save = false; |
|
2178 |
if (scan_temp == noreg || sethi_temp == noreg) { |
|
2179 |
Register recv_2 = recv_klass->is_global() ? recv_klass : L0; |
|
2180 |
Register intf_2 = intf_klass->is_global() ? intf_klass : L1; |
|
2181 |
assert(method_result->is_global(), "must be able to return value"); |
|
2182 |
scan_temp = L2; |
|
2183 |
sethi_temp = L3; |
|
2184 |
save_frame_and_mov(0, recv_klass, recv_2, intf_klass, intf_2); |
|
2185 |
recv_klass = recv_2; |
|
2186 |
intf_klass = intf_2; |
|
2187 |
did_save = true; |
|
2188 |
} |
|
2189 |
||
2190 |
// Compute start of first itableOffsetEntry (which is at the end of the vtable) |
|
35899 | 2191 |
int vtable_base = in_bytes(Klass::vtable_start_offset()); |
14631 | 2192 |
int scan_step = itableOffsetEntry::size() * wordSize; |
35871
607bf949dfb3
8147461: Use byte offsets for vtable start and vtable length offsets
mgerdin
parents:
35847
diff
changeset
|
2193 |
int vte_size = vtableEntry::size_in_bytes(); |
607bf949dfb3
8147461: Use byte offsets for vtable start and vtable length offsets
mgerdin
parents:
35847
diff
changeset
|
2194 |
|
35899 | 2195 |
lduw(recv_klass, in_bytes(Klass::vtable_length_offset()), scan_temp); |
14631 | 2196 |
// %%% We should store the aligned, prescaled offset in the klassoop. |
2197 |
// Then the next several instructions would fold away. |
|
2198 |
||
2199 |
int itb_offset = vtable_base; |
|
35871
607bf949dfb3
8147461: Use byte offsets for vtable start and vtable length offsets
mgerdin
parents:
35847
diff
changeset
|
2200 |
int itb_scale = exact_log2(vtableEntry::size_in_bytes()); |
14631 | 2201 |
sll(scan_temp, itb_scale, scan_temp); |
2202 |
add(scan_temp, itb_offset, scan_temp); |
|
2203 |
add(recv_klass, scan_temp, scan_temp); |
|
2204 |
||
2205 |
// Adjust recv_klass by scaled itable_index, so we can free itable_index. |
|
2206 |
RegisterOrConstant itable_offset = itable_index; |
|
2207 |
itable_offset = regcon_sll_ptr(itable_index, exact_log2(itableMethodEntry::size() * wordSize), itable_offset); |
|
2208 |
itable_offset = regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes(), itable_offset); |
|
2209 |
add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass); |
|
2210 |
||
2211 |
// for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) { |
|
2212 |
// if (scan->interface() == intf) { |
|
2213 |
// result = (klass + scan->offset() + itable_index); |
|
2214 |
// } |
|
2215 |
// } |
|
2216 |
Label L_search, L_found_method; |
|
2217 |
||
2218 |
for (int peel = 1; peel >= 0; peel--) { |
|
2219 |
// %%%% Could load both offset and interface in one ldx, if they were |
|
2220 |
// in the opposite order. This would save a load. |
|
2221 |
ld_ptr(scan_temp, itableOffsetEntry::interface_offset_in_bytes(), method_result); |
|
2222 |
||
2223 |
// Check that this entry is non-null. A null entry means that |
|
2224 |
// the receiver class doesn't implement the interface, and wasn't the |
|
2225 |
// same as when the caller was compiled. |
|
2226 |
bpr(Assembler::rc_z, false, Assembler::pn, method_result, did_save ? L_no_such_interface_restore : L_no_such_interface); |
|
2227 |
delayed()->cmp(method_result, intf_klass); |
|
2228 |
||
2229 |
if (peel) { |
|
2230 |
brx(Assembler::equal, false, Assembler::pt, L_found_method); |
|
2231 |
} else { |
|
2232 |
brx(Assembler::notEqual, false, Assembler::pn, L_search); |
|
2233 |
// (invert the test to fall through to found_method...) |
|
2234 |
} |
|
2235 |
delayed()->add(scan_temp, scan_step, scan_temp); |
|
2236 |
||
2237 |
if (!peel) break; |
|
2238 |
||
2239 |
bind(L_search); |
|
2240 |
} |
|
2241 |
||
2242 |
bind(L_found_method); |
|
2243 |
||
2244 |
// Got a hit. |
|
2245 |
int ito_offset = itableOffsetEntry::offset_offset_in_bytes(); |
|
2246 |
// scan_temp[-scan_step] points to the vtable offset we need |
|
2247 |
ito_offset -= scan_step; |
|
2248 |
lduw(scan_temp, ito_offset, scan_temp); |
|
2249 |
ld_ptr(recv_klass, scan_temp, method_result); |
|
2250 |
||
2251 |
if (did_save) { |
|
2252 |
Label L_done; |
|
2253 |
ba(L_done); |
|
2254 |
delayed()->restore(); |
|
2255 |
||
2256 |
bind(L_no_such_interface_restore); |
|
2257 |
ba(L_no_such_interface); |
|
2258 |
delayed()->restore(); |
|
2259 |
||
2260 |
bind(L_done); |
|
2261 |
} |
|
2262 |
} |
|
2263 |
||
2264 |
||
2265 |
// virtual method calling |
|
2266 |
void MacroAssembler::lookup_virtual_method(Register recv_klass, |
|
2267 |
RegisterOrConstant vtable_index, |
|
2268 |
Register method_result) { |
|
2269 |
assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg()); |
|
2270 |
Register sethi_temp = method_result; |
|
35899 | 2271 |
const int base = in_bytes(Klass::vtable_start_offset()) + |
35871
607bf949dfb3
8147461: Use byte offsets for vtable start and vtable length offsets
mgerdin
parents:
35847
diff
changeset
|
2272 |
// method pointer offset within the vtable entry: |
607bf949dfb3
8147461: Use byte offsets for vtable start and vtable length offsets
mgerdin
parents:
35847
diff
changeset
|
2273 |
vtableEntry::method_offset_in_bytes(); |
14631 | 2274 |
RegisterOrConstant vtable_offset = vtable_index; |
2275 |
// Each of the following three lines potentially generates an instruction. |
|
2276 |
// But the total number of address formation instructions will always be |
|
2277 |
// at most two, and will often be zero. In any case, it will be optimal. |
|
2278 |
// If vtable_index is a register, we will have (sll_ptr N,x; inc_ptr B,x; ld_ptr k,x). |
|
2279 |
// If vtable_index is a constant, we will have at most (set B+X<<N,t; ld_ptr k,t). |
|
35871
607bf949dfb3
8147461: Use byte offsets for vtable start and vtable length offsets
mgerdin
parents:
35847
diff
changeset
|
2280 |
vtable_offset = regcon_sll_ptr(vtable_index, exact_log2(vtableEntry::size_in_bytes()), vtable_offset); |
14631 | 2281 |
vtable_offset = regcon_inc_ptr(vtable_offset, base, vtable_offset, sethi_temp); |
2282 |
Address vtable_entry_addr(recv_klass, ensure_simm13_or_reg(vtable_offset, sethi_temp)); |
|
2283 |
ld_ptr(vtable_entry_addr, method_result); |
|
2284 |
} |
|
2285 |
||
2286 |
||
2287 |
void MacroAssembler::check_klass_subtype(Register sub_klass, |
|
2288 |
Register super_klass, |
|
2289 |
Register temp_reg, |
|
2290 |
Register temp2_reg, |
|
2291 |
Label& L_success) { |
|
2292 |
Register sub_2 = sub_klass; |
|
2293 |
Register sup_2 = super_klass; |
|
2294 |
if (!sub_2->is_global()) sub_2 = L0; |
|
2295 |
if (!sup_2->is_global()) sup_2 = L1; |
|
2296 |
bool did_save = false; |
|
2297 |
if (temp_reg == noreg || temp2_reg == noreg) { |
|
2298 |
temp_reg = L2; |
|
2299 |
temp2_reg = L3; |
|
2300 |
save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); |
|
2301 |
sub_klass = sub_2; |
|
2302 |
super_klass = sup_2; |
|
2303 |
did_save = true; |
|
2304 |
} |
|
2305 |
Label L_failure, L_pop_to_failure, L_pop_to_success; |
|
2306 |
check_klass_subtype_fast_path(sub_klass, super_klass, |
|
2307 |
temp_reg, temp2_reg, |
|
2308 |
(did_save ? &L_pop_to_success : &L_success), |
|
2309 |
(did_save ? &L_pop_to_failure : &L_failure), NULL); |
|
2310 |
||
2311 |
if (!did_save) |
|
2312 |
save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2); |
|
2313 |
check_klass_subtype_slow_path(sub_2, sup_2, |
|
2314 |
L2, L3, L4, L5, |
|
2315 |
NULL, &L_pop_to_failure); |
|
2316 |
||
2317 |
// on success: |
|
2318 |
bind(L_pop_to_success); |
|
2319 |
restore(); |
|
2320 |
ba_short(L_success); |
|
2321 |
||
2322 |
// on failure: |
|
2323 |
bind(L_pop_to_failure); |
|
2324 |
restore(); |
|
2325 |
bind(L_failure); |
|
2326 |
} |
|
2327 |
||
2328 |
||
2329 |
void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, |
|
2330 |
Register super_klass, |
|
2331 |
Register temp_reg, |
|
2332 |
Register temp2_reg, |
|
2333 |
Label* L_success, |
|
2334 |
Label* L_failure, |
|
2335 |
Label* L_slow_path, |
|
2336 |
RegisterOrConstant super_check_offset) { |
|
2337 |
int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); |
|
2338 |
int sco_offset = in_bytes(Klass::super_check_offset_offset()); |
|
2339 |
||
2340 |
bool must_load_sco = (super_check_offset.constant_or_zero() == -1); |
|
2341 |
bool need_slow_path = (must_load_sco || |
|
2342 |
super_check_offset.constant_or_zero() == sco_offset); |
|
2343 |
||
2344 |
assert_different_registers(sub_klass, super_klass, temp_reg); |
|
2345 |
if (super_check_offset.is_register()) { |
|
2346 |
assert_different_registers(sub_klass, super_klass, temp_reg, |
|
2347 |
super_check_offset.as_register()); |
|
2348 |
} else if (must_load_sco) { |
|
2349 |
assert(temp2_reg != noreg, "supply either a temp or a register offset"); |
|
2350 |
} |
|
2351 |
||
2352 |
Label L_fallthrough; |
|
2353 |
int label_nulls = 0; |
|
2354 |
if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } |
|
2355 |
if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } |
|
2356 |
if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } |
|
2357 |
assert(label_nulls <= 1 || |
|
2358 |
(L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path), |
|
2359 |
"at most one NULL in the batch, usually"); |
|
2360 |
||
2361 |
// If the pointers are equal, we are done (e.g., String[] elements). |
|
2362 |
// This self-check enables sharing of secondary supertype arrays among |
|
2363 |
// non-primary types such as array-of-interface. Otherwise, each such |
|
2364 |
// type would need its own customized SSA. |
|
2365 |
// We move this check to the front of the fast path because many |
|
2366 |
// type checks are in fact trivially successful in this manner, |
|
2367 |
// so we get a nicely predicted branch right at the start of the check. |
|
2368 |
cmp(super_klass, sub_klass); |
|
2369 |
brx(Assembler::equal, false, Assembler::pn, *L_success); |
|
2370 |
delayed()->nop(); |
|
2371 |
||
2372 |
// Check the supertype display: |
|
2373 |
if (must_load_sco) { |
|
2374 |
// The super check offset is always positive... |
|
2375 |
lduw(super_klass, sco_offset, temp2_reg); |
|
2376 |
super_check_offset = RegisterOrConstant(temp2_reg); |
|
2377 |
// super_check_offset is register. |
|
2378 |
assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset.as_register()); |
|
2379 |
} |
|
2380 |
ld_ptr(sub_klass, super_check_offset, temp_reg); |
|
2381 |
cmp(super_klass, temp_reg); |
|
2382 |
||
2383 |
// This check has worked decisively for primary supers. |
|
2384 |
// Secondary supers are sought in the super_cache ('super_cache_addr'). |
|
2385 |
// (Secondary supers are interfaces and very deeply nested subtypes.) |
|
2386 |
// This works in the same check above because of a tricky aliasing |
|
2387 |
// between the super_cache and the primary super display elements. |
|
2388 |
// (The 'super_check_addr' can address either, as the case requires.) |
|
2389 |
// Note that the cache is updated below if it does not help us find |
|
2390 |
// what we need immediately. |
|
2391 |
// So if it was a primary super, we can just fail immediately. |
|
2392 |
// Otherwise, it's the slow path for us (no success at this point). |
|
2393 |
||
2394 |
// Hacked ba(), which may only be used just before L_fallthrough. |
|
2395 |
#define FINAL_JUMP(label) \ |
|
2396 |
if (&(label) != &L_fallthrough) { \ |
|
2397 |
ba(label); delayed()->nop(); \ |
|
2398 |
} |
|
2399 |
||
2400 |
if (super_check_offset.is_register()) { |
|
2401 |
brx(Assembler::equal, false, Assembler::pn, *L_success); |
|
2402 |
delayed()->cmp(super_check_offset.as_register(), sc_offset); |
|
2403 |
||
2404 |
if (L_failure == &L_fallthrough) { |
|
2405 |
brx(Assembler::equal, false, Assembler::pt, *L_slow_path); |
|
2406 |
delayed()->nop(); |
|
2407 |
} else { |
|
2408 |
brx(Assembler::notEqual, false, Assembler::pn, *L_failure); |
|
2409 |
delayed()->nop(); |
|
2410 |
FINAL_JUMP(*L_slow_path); |
|
2411 |
} |
|
2412 |
} else if (super_check_offset.as_constant() == sc_offset) { |
|
2413 |
// Need a slow path; fast failure is impossible. |
|
2414 |
if (L_slow_path == &L_fallthrough) { |
|
2415 |
brx(Assembler::equal, false, Assembler::pt, *L_success); |
|
2416 |
delayed()->nop(); |
|
2417 |
} else { |
|
2418 |
brx(Assembler::notEqual, false, Assembler::pn, *L_slow_path); |
|
2419 |
delayed()->nop(); |
|
2420 |
FINAL_JUMP(*L_success); |
|
2421 |
} |
|
2422 |
} else { |
|
2423 |
// No slow path; it's a fast decision. |
|
2424 |
if (L_failure == &L_fallthrough) { |
|
2425 |
brx(Assembler::equal, false, Assembler::pt, *L_success); |
|
2426 |
delayed()->nop(); |
|
2427 |
} else { |
|
2428 |
brx(Assembler::notEqual, false, Assembler::pn, *L_failure); |
|
2429 |
delayed()->nop(); |
|
2430 |
FINAL_JUMP(*L_success); |
|
2431 |
} |
|
2432 |
} |
|
2433 |
||
2434 |
bind(L_fallthrough); |
|
2435 |
||
2436 |
#undef FINAL_JUMP |
|
2437 |
} |
|
2438 |
||
2439 |
||
2440 |
void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, |
|
2441 |
Register super_klass, |
|
2442 |
Register count_temp, |
|
2443 |
Register scan_temp, |
|
2444 |
Register scratch_reg, |
|
2445 |
Register coop_reg, |
|
2446 |
Label* L_success, |
|
2447 |
Label* L_failure) { |
|
2448 |
assert_different_registers(sub_klass, super_klass, |
|
2449 |
count_temp, scan_temp, scratch_reg, coop_reg); |
|
2450 |
||
2451 |
Label L_fallthrough, L_loop; |
|
2452 |
int label_nulls = 0; |
|
2453 |
if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } |
|
2454 |
if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } |
|
2455 |
assert(label_nulls <= 1, "at most one NULL in the batch"); |
|
2456 |
||
2457 |
// a couple of useful fields in sub_klass: |
|
2458 |
int ss_offset = in_bytes(Klass::secondary_supers_offset()); |
|
2459 |
int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); |
|
2460 |
||
2461 |
// Do a linear scan of the secondary super-klass chain. |
|
2462 |
// This code is rarely used, so simplicity is a virtue here. |
|
2463 |
||
2464 |
#ifndef PRODUCT |
|
2465 |
int* pst_counter = &SharedRuntime::_partial_subtype_ctr; |
|
2466 |
inc_counter((address) pst_counter, count_temp, scan_temp); |
|
2467 |
#endif |
|
2468 |
||
2469 |
// We will consult the secondary-super array. |
|
2470 |
ld_ptr(sub_klass, ss_offset, scan_temp); |
|
2471 |
||
2472 |
Register search_key = super_klass; |
|
2473 |
||
2474 |
// Load the array length. (Positive movl does right thing on LP64.) |
|
2475 |
lduw(scan_temp, Array<Klass*>::length_offset_in_bytes(), count_temp); |
|
2476 |
||
2477 |
// Check for empty secondary super list |
|
2478 |
tst(count_temp); |
|
2479 |
||
2480 |
// In the array of super classes elements are pointer sized. |
|
2481 |
int element_size = wordSize; |
|
2482 |
||
2483 |
// Top of search loop |
|
2484 |
bind(L_loop); |
|
2485 |
br(Assembler::equal, false, Assembler::pn, *L_failure); |
|
2486 |
delayed()->add(scan_temp, element_size, scan_temp); |
|
2487 |
||
2488 |
// Skip the array header in all array accesses. |
|
2489 |
int elem_offset = Array<Klass*>::base_offset_in_bytes(); |
|
2490 |
elem_offset -= element_size; // the scan pointer was pre-incremented also |
|
2491 |
||
2492 |
// Load next super to check |
|
2493 |
ld_ptr( scan_temp, elem_offset, scratch_reg ); |
|
2494 |
||
2495 |
// Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list |
|
2496 |
cmp(scratch_reg, search_key); |
|
2497 |
||
2498 |
// A miss means we are NOT a subtype and need to keep looping |
|
2499 |
brx(Assembler::notEqual, false, Assembler::pn, L_loop); |
|
2500 |
delayed()->deccc(count_temp); // decrement trip counter in delay slot |
|
2501 |
||
2502 |
// Success. Cache the super we found and proceed in triumph. |
|
2503 |
st_ptr(super_klass, sub_klass, sc_offset); |
|
2504 |
||
2505 |
if (L_success != &L_fallthrough) { |
|
2506 |
ba(*L_success); |
|
2507 |
delayed()->nop(); |
|
2508 |
} |
|
2509 |
||
2510 |
bind(L_fallthrough); |
|
2511 |
} |
|
2512 |
||
2513 |
||
2514 |
RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot, |
|
2515 |
Register temp_reg, |
|
2516 |
int extra_slot_offset) { |
|
2517 |
// cf. TemplateTable::prepare_invoke(), if (load_receiver). |
|
2518 |
int stackElementSize = Interpreter::stackElementSize; |
|
2519 |
int offset = extra_slot_offset * stackElementSize; |
|
2520 |
if (arg_slot.is_constant()) { |
|
2521 |
offset += arg_slot.as_constant() * stackElementSize; |
|
2522 |
return offset; |
|
2523 |
} else { |
|
2524 |
assert(temp_reg != noreg, "must specify"); |
|
2525 |
sll_ptr(arg_slot.as_register(), exact_log2(stackElementSize), temp_reg); |
|
2526 |
if (offset != 0) |
|
2527 |
add(temp_reg, offset, temp_reg); |
|
2528 |
return temp_reg; |
|
2529 |
} |
|
2530 |
} |
|
2531 |
||
2532 |
||
2533 |
Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, |
|
2534 |
Register temp_reg, |
|
2535 |
int extra_slot_offset) { |
|
2536 |
return Address(Gargs, argument_offset(arg_slot, temp_reg, extra_slot_offset)); |
|
2537 |
} |
|
2538 |
||
2539 |
||
2540 |
void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg, |
|
2541 |
Register temp_reg, |
|
2542 |
Label& done, Label* slow_case, |
|
2543 |
BiasedLockingCounters* counters) { |
|
2544 |
assert(UseBiasedLocking, "why call this otherwise?"); |
|
2545 |
||
2546 |
if (PrintBiasedLockingStatistics) { |
|
2547 |
assert_different_registers(obj_reg, mark_reg, temp_reg, O7); |
|
2548 |
if (counters == NULL) |
|
2549 |
counters = BiasedLocking::counters(); |
|
2550 |
} |
|
2551 |
||
2552 |
Label cas_label; |
|
2553 |
||
2554 |
// Biased locking |
|
2555 |
// See whether the lock is currently biased toward our thread and |
|
2556 |
// whether the epoch is still valid |
|
2557 |
// Note that the runtime guarantees sufficient alignment of JavaThread |
|
2558 |
// pointers to allow age to be placed into low bits |
|
2559 |
assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); |
|
2560 |
and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg); |
|
2561 |
cmp_and_brx_short(temp_reg, markOopDesc::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label); |
|
2562 |
||
2563 |
load_klass(obj_reg, temp_reg); |
|
2564 |
ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); |
|
2565 |
or3(G2_thread, temp_reg, temp_reg); |
|
2566 |
xor3(mark_reg, temp_reg, temp_reg); |
|
2567 |
andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg); |
|
2568 |
if (counters != NULL) { |
|
2569 |
cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg); |
|
2570 |
// Reload mark_reg as we may need it later |
|
2571 |
ld_ptr(Address(obj_reg, oopDesc::mark_offset_in_bytes()), mark_reg); |
|
2572 |
} |
|
2573 |
brx(Assembler::equal, true, Assembler::pt, done); |
|
2574 |
delayed()->nop(); |
|
2575 |
||
2576 |
Label try_revoke_bias; |
|
2577 |
Label try_rebias; |
|
2578 |
Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes()); |
|
2579 |
assert(mark_addr.disp() == 0, "cas must take a zero displacement"); |
|
2580 |
||
2581 |
// At this point we know that the header has the bias pattern and |
|
2582 |
// that we are not the bias owner in the current epoch. We need to |
|
2583 |
// figure out more details about the state of the header in order to |
|
2584 |
// know what operations can be legally performed on the object's |
|
2585 |
// header. |
|
2586 |
||
2587 |
// If the low three bits in the xor result aren't clear, that means |
|
2588 |
// the prototype header is no longer biased and we have to revoke |
|
2589 |
// the bias on this object. |
|
2590 |
btst(markOopDesc::biased_lock_mask_in_place, temp_reg); |
|
2591 |
brx(Assembler::notZero, false, Assembler::pn, try_revoke_bias); |
|
2592 |
||
2593 |
// Biasing is still enabled for this data type. See whether the |
|
2594 |
// epoch of the current bias is still valid, meaning that the epoch |
|
2595 |
// bits of the mark word are equal to the epoch bits of the |
|
2596 |
// prototype header. (Note that the prototype header's epoch bits |
|
2597 |
// only change at a safepoint.) If not, attempt to rebias the object |
|
2598 |
// toward the current thread. Note that we must be absolutely sure |
|
2599 |
// that the current epoch is invalid in order to do this because |
|
2600 |
// otherwise the manipulations it performs on the mark word are |
|
2601 |
// illegal. |
|
2602 |
delayed()->btst(markOopDesc::epoch_mask_in_place, temp_reg); |
|
2603 |
brx(Assembler::notZero, false, Assembler::pn, try_rebias); |
|
2604 |
||
2605 |
// The epoch of the current bias is still valid but we know nothing |
|
2606 |
// about the owner; it might be set or it might be clear. Try to |
|
2607 |
// acquire the bias of the object using an atomic operation. If this |
|
2608 |
// fails we will go in to the runtime to revoke the object's bias. |
|
2609 |
// Note that we first construct the presumed unbiased header so we |
|
2610 |
// don't accidentally blow away another thread's valid bias. |
|
2611 |
delayed()->and3(mark_reg, |
|
2612 |
markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place, |
|
2613 |
mark_reg); |
|
2614 |
or3(G2_thread, mark_reg, temp_reg); |
|
18097 | 2615 |
cas_ptr(mark_addr.base(), mark_reg, temp_reg); |
14631 | 2616 |
// If the biasing toward our thread failed, this means that |
2617 |
// another thread succeeded in biasing it toward itself and we |
|
2618 |
// need to revoke that bias. The revocation will occur in the |
|
2619 |
// interpreter runtime in the slow case. |
|
2620 |
cmp(mark_reg, temp_reg); |
|
2621 |
if (counters != NULL) { |
|
2622 |
cond_inc(Assembler::zero, (address) counters->anonymously_biased_lock_entry_count_addr(), mark_reg, temp_reg); |
|
2623 |
} |
|
2624 |
if (slow_case != NULL) { |
|
2625 |
brx(Assembler::notEqual, true, Assembler::pn, *slow_case); |
|
2626 |
delayed()->nop(); |
|
2627 |
} |
|
2628 |
ba_short(done); |
|
2629 |
||
2630 |
bind(try_rebias); |
|
2631 |
// At this point we know the epoch has expired, meaning that the |
|
2632 |
// current "bias owner", if any, is actually invalid. Under these |
|
2633 |
// circumstances _only_, we are allowed to use the current header's |
|
2634 |
// value as the comparison value when doing the cas to acquire the |
|
2635 |
// bias in the current epoch. In other words, we allow transfer of |
|
2636 |
// the bias from one thread to another directly in this situation. |
|
2637 |
// |
|
2638 |
// FIXME: due to a lack of registers we currently blow away the age |
|
2639 |
// bits in this situation. Should attempt to preserve them. |
|
2640 |
load_klass(obj_reg, temp_reg); |
|
2641 |
ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); |
|
2642 |
or3(G2_thread, temp_reg, temp_reg); |
|
18097 | 2643 |
cas_ptr(mark_addr.base(), mark_reg, temp_reg); |
14631 | 2644 |
// If the biasing toward our thread failed, this means that |
2645 |
// another thread succeeded in biasing it toward itself and we |
|
2646 |
// need to revoke that bias. The revocation will occur in the |
|
2647 |
// interpreter runtime in the slow case. |
|
2648 |
cmp(mark_reg, temp_reg); |
|
2649 |
if (counters != NULL) { |
|
2650 |
cond_inc(Assembler::zero, (address) counters->rebiased_lock_entry_count_addr(), mark_reg, temp_reg); |
|
2651 |
} |
|
2652 |
if (slow_case != NULL) { |
|
2653 |
brx(Assembler::notEqual, true, Assembler::pn, *slow_case); |
|
2654 |
delayed()->nop(); |
|
2655 |
} |
|
2656 |
ba_short(done); |
|
2657 |
||
2658 |
bind(try_revoke_bias); |
|
2659 |
// The prototype mark in the klass doesn't have the bias bit set any |
|
2660 |
// more, indicating that objects of this data type are not supposed |
|
2661 |
// to be biased any more. We are going to try to reset the mark of |
|
2662 |
// this object to the prototype value and fall through to the |
|
2663 |
// CAS-based locking scheme. Note that if our CAS fails, it means |
|
2664 |
// that another thread raced us for the privilege of revoking the |
|
2665 |
// bias of this particular object, so it's okay to continue in the |
|
2666 |
// normal locking code. |
|
2667 |
// |
|
2668 |
// FIXME: due to a lack of registers we currently blow away the age |
|
2669 |
// bits in this situation. Should attempt to preserve them. |
|
2670 |
load_klass(obj_reg, temp_reg); |
|
2671 |
ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); |
|
18097 | 2672 |
cas_ptr(mark_addr.base(), mark_reg, temp_reg); |
14631 | 2673 |
// Fall through to the normal CAS-based lock, because no matter what |
2674 |
// the result of the above CAS, some thread must have succeeded in |
|
2675 |
// removing the bias bit from the object's header. |
|
2676 |
if (counters != NULL) { |
|
2677 |
cmp(mark_reg, temp_reg); |
|
2678 |
cond_inc(Assembler::zero, (address) counters->revoked_lock_entry_count_addr(), mark_reg, temp_reg); |
|
2679 |
} |
|
2680 |
||
2681 |
bind(cas_label); |
|
2682 |
} |
|
2683 |
||
2684 |
void MacroAssembler::biased_locking_exit (Address mark_addr, Register temp_reg, Label& done, |
|
2685 |
bool allow_delay_slot_filling) { |
|
2686 |
// Check for biased locking unlock case, which is a no-op |
|
2687 |
// Note: we do not have to check the thread ID for two reasons. |
|
2688 |
// First, the interpreter checks for IllegalMonitorStateException at |
|
2689 |
// a higher level. Second, if the bias was revoked while we held the |
|
2690 |
// lock, the object could not be rebiased toward another thread, so |
|
2691 |
// the bias bit would be clear. |
|
2692 |
ld_ptr(mark_addr, temp_reg); |
|
2693 |
and3(temp_reg, markOopDesc::biased_lock_mask_in_place, temp_reg); |
|
2694 |
cmp(temp_reg, markOopDesc::biased_lock_pattern); |
|
2695 |
brx(Assembler::equal, allow_delay_slot_filling, Assembler::pt, done); |
|
2696 |
delayed(); |
|
2697 |
if (!allow_delay_slot_filling) { |
|
2698 |
nop(); |
|
2699 |
} |
|
2700 |
} |
|
2701 |
||
2702 |
||
2703 |
// compiler_lock_object() and compiler_unlock_object() are direct transliterations |
|
2704 |
// of i486.ad fast_lock() and fast_unlock(). See those methods for detailed comments. |
|
2705 |
// The code could be tightened up considerably. |
|
2706 |
// |
|
2707 |
// box->dhw disposition - post-conditions at DONE_LABEL. |
|
2708 |
// - Successful inflated lock: box->dhw != 0. |
|
2709 |
// Any non-zero value suffices. |
|
27608 | 2710 |
// Consider G2_thread, rsp, boxReg, or markOopDesc::unused_mark() |
14631 | 2711 |
// - Successful Stack-lock: box->dhw == mark. |
2712 |
// box->dhw must contain the displaced mark word value |
|
2713 |
// - Failure -- icc.ZFlag == 0 and box->dhw is undefined. |
|
2714 |
// The slow-path fast_enter() and slow_enter() operators |
|
27608 | 2715 |
// are responsible for setting box->dhw = NonZero (typically markOopDesc::unused_mark()). |
14631 | 2716 |
// - Biased: box->dhw is undefined |
2717 |
// |
|
2718 |
// SPARC refworkload performance - specifically jetstream and scimark - are |
|
2719 |
// extremely sensitive to the size of the code emitted by compiler_lock_object |
|
2720 |
// and compiler_unlock_object. Critically, the key factor is code size, not path |
|
2721 |
// length. (Simply experiments to pad CLO with unexecuted NOPs demonstrte the |
|
2722 |
// effect). |
|
2723 |
||
2724 |
||
2725 |
void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark, |
|
2726 |
Register Rbox, Register Rscratch, |
|
2727 |
BiasedLockingCounters* counters, |
|
2728 |
bool try_bias) { |
|
2729 |
Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); |
|
2730 |
||
2731 |
verify_oop(Roop); |
|
2732 |
Label done ; |
|
2733 |
||
2734 |
if (counters != NULL) { |
|
2735 |
inc_counter((address) counters->total_entry_count_addr(), Rmark, Rscratch); |
|
2736 |
} |
|
2737 |
||
2738 |
if (EmitSync & 1) { |
|
2739 |
mov(3, Rscratch); |
|
2740 |
st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); |
|
2741 |
cmp(SP, G0); |
|
2742 |
return ; |
|
2743 |
} |
|
2744 |
||
2745 |
if (EmitSync & 2) { |
|
2746 |
||
2747 |
// Fetch object's markword |
|
2748 |
ld_ptr(mark_addr, Rmark); |
|
2749 |
||
2750 |
if (try_bias) { |
|
2751 |
biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); |
|
2752 |
} |
|
2753 |
||
2754 |
// Save Rbox in Rscratch to be used for the cas operation |
|
2755 |
mov(Rbox, Rscratch); |
|
2756 |
||
2757 |
// set Rmark to markOop | markOopDesc::unlocked_value |
|
2758 |
or3(Rmark, markOopDesc::unlocked_value, Rmark); |
|
2759 |
||
2760 |
// Initialize the box. (Must happen before we update the object mark!) |
|
2761 |
st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); |
|
2762 |
||
2763 |
// compare object markOop with Rmark and if equal exchange Rscratch with object markOop |
|
2764 |
assert(mark_addr.disp() == 0, "cas must take a zero displacement"); |
|
18097 | 2765 |
cas_ptr(mark_addr.base(), Rmark, Rscratch); |
14631 | 2766 |
|
2767 |
// if compare/exchange succeeded we found an unlocked object and we now have locked it |
|
2768 |
// hence we are done |
|
2769 |
cmp(Rmark, Rscratch); |
|
2770 |
#ifdef _LP64 |
|
2771 |
sub(Rscratch, STACK_BIAS, Rscratch); |
|
2772 |
#endif |
|
2773 |
brx(Assembler::equal, false, Assembler::pt, done); |
|
2774 |
delayed()->sub(Rscratch, SP, Rscratch); //pull next instruction into delay slot |
|
2775 |
||
2776 |
// we did not find an unlocked object so see if this is a recursive case |
|
2777 |
// sub(Rscratch, SP, Rscratch); |
|
2778 |
assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); |
|
2779 |
andcc(Rscratch, 0xfffff003, Rscratch); |
|
2780 |
st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); |
|
2781 |
bind (done); |
|
2782 |
return ; |
|
2783 |
} |
|
2784 |
||
2785 |
Label Egress ; |
|
2786 |
||
2787 |
if (EmitSync & 256) { |
|
2788 |
Label IsInflated ; |
|
2789 |
||
2790 |
ld_ptr(mark_addr, Rmark); // fetch obj->mark |
|
2791 |
// Triage: biased, stack-locked, neutral, inflated |
|
2792 |
if (try_bias) { |
|
2793 |
biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); |
|
2794 |
// Invariant: if control reaches this point in the emitted stream |
|
2795 |
// then Rmark has not been modified. |
|
2796 |
} |
|
2797 |
||
2798 |
// Store mark into displaced mark field in the on-stack basic-lock "box" |
|
2799 |
// Critically, this must happen before the CAS |
|
2800 |
// Maximize the ST-CAS distance to minimize the ST-before-CAS penalty. |
|
2801 |
st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); |
|
2802 |
andcc(Rmark, 2, G0); |
|
2803 |
brx(Assembler::notZero, false, Assembler::pn, IsInflated); |
|
2804 |
delayed()-> |
|
2805 |
||
2806 |
// Try stack-lock acquisition. |
|
2807 |
// Beware: the 1st instruction is in a delay slot |
|
2808 |
mov(Rbox, Rscratch); |
|
2809 |
or3(Rmark, markOopDesc::unlocked_value, Rmark); |
|
2810 |
assert(mark_addr.disp() == 0, "cas must take a zero displacement"); |
|
18097 | 2811 |
cas_ptr(mark_addr.base(), Rmark, Rscratch); |
14631 | 2812 |
cmp(Rmark, Rscratch); |
2813 |
brx(Assembler::equal, false, Assembler::pt, done); |
|
2814 |
delayed()->sub(Rscratch, SP, Rscratch); |
|
2815 |
||
2816 |
// Stack-lock attempt failed - check for recursive stack-lock. |
|
2817 |
// See the comments below about how we might remove this case. |
|
2818 |
#ifdef _LP64 |
|
2819 |
sub(Rscratch, STACK_BIAS, Rscratch); |
|
2820 |
#endif |
|
2821 |
assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); |
|
2822 |
andcc(Rscratch, 0xfffff003, Rscratch); |
|
2823 |
br(Assembler::always, false, Assembler::pt, done); |
|
2824 |
delayed()-> st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); |
|
2825 |
||
2826 |
bind(IsInflated); |
|
2827 |
if (EmitSync & 64) { |
|
2828 |
// If m->owner != null goto IsLocked |
|
2829 |
// Pessimistic form: Test-and-CAS vs CAS |
|
2830 |
// The optimistic form avoids RTS->RTO cache line upgrades. |
|
27608 | 2831 |
ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rscratch); |
14631 | 2832 |
andcc(Rscratch, Rscratch, G0); |
2833 |
brx(Assembler::notZero, false, Assembler::pn, done); |
|
2834 |
delayed()->nop(); |
|
2835 |
// m->owner == null : it's unlocked. |
|
2836 |
} |
|
2837 |
||
2838 |
// Try to CAS m->owner from null to Self |
|
2839 |
// Invariant: if we acquire the lock then _recursions should be 0. |
|
27608 | 2840 |
add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); |
14631 | 2841 |
mov(G2_thread, Rscratch); |
18097 | 2842 |
cas_ptr(Rmark, G0, Rscratch); |
14631 | 2843 |
cmp(Rscratch, G0); |
2844 |
// Intentional fall-through into done |
|
2845 |
} else { |
|
2846 |
// Aggressively avoid the Store-before-CAS penalty |
|
2847 |
// Defer the store into box->dhw until after the CAS |
|
2848 |
Label IsInflated, Recursive ; |
|
2849 |
||
2850 |
// Anticipate CAS -- Avoid RTS->RTO upgrade |
|
2851 |
// prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); |
|
2852 |
||
2853 |
ld_ptr(mark_addr, Rmark); // fetch obj->mark |
|
2854 |
// Triage: biased, stack-locked, neutral, inflated |
|
2855 |
||
2856 |
if (try_bias) { |
|
2857 |
biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); |
|
2858 |
// Invariant: if control reaches this point in the emitted stream |
|
2859 |
// then Rmark has not been modified. |
|
2860 |
} |
|
2861 |
andcc(Rmark, 2, G0); |
|
2862 |
brx(Assembler::notZero, false, Assembler::pn, IsInflated); |
|
2863 |
delayed()-> // Beware - dangling delay-slot |
|
2864 |
||
2865 |
// Try stack-lock acquisition. |
|
2866 |
// Transiently install BUSY (0) encoding in the mark word. |
|
2867 |
// if the CAS of 0 into the mark was successful then we execute: |
|
2868 |
// ST box->dhw = mark -- save fetched mark in on-stack basiclock box |
|
2869 |
// ST obj->mark = box -- overwrite transient 0 value |
|
2870 |
// This presumes TSO, of course. |
|
2871 |
||
2872 |
mov(0, Rscratch); |
|
2873 |
or3(Rmark, markOopDesc::unlocked_value, Rmark); |
|
2874 |
assert(mark_addr.disp() == 0, "cas must take a zero displacement"); |
|
18097 | 2875 |
cas_ptr(mark_addr.base(), Rmark, Rscratch); |
14631 | 2876 |
// prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); |
2877 |
cmp(Rscratch, Rmark); |
|
2878 |
brx(Assembler::notZero, false, Assembler::pn, Recursive); |
|
2879 |
delayed()->st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); |
|
2880 |
if (counters != NULL) { |
|
2881 |
cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); |
|
2882 |
} |
|
2883 |
ba(done); |
|
2884 |
delayed()->st_ptr(Rbox, mark_addr); |
|
2885 |
||
2886 |
bind(Recursive); |
|
2887 |
// Stack-lock attempt failed - check for recursive stack-lock. |
|
2888 |
// Tests show that we can remove the recursive case with no impact |
|
2889 |
// on refworkload 0.83. If we need to reduce the size of the code |
|
2890 |
// emitted by compiler_lock_object() the recursive case is perfect |
|
2891 |
// candidate. |
|
2892 |
// |
|
2893 |
// A more extreme idea is to always inflate on stack-lock recursion. |
|
2894 |
// This lets us eliminate the recursive checks in compiler_lock_object |
|
2895 |
// and compiler_unlock_object and the (box->dhw == 0) encoding. |
|
2896 |
// A brief experiment - requiring changes to synchronizer.cpp, interpreter, |
|
2897 |
// and showed a performance *increase*. In the same experiment I eliminated |
|
2898 |
// the fast-path stack-lock code from the interpreter and always passed |
|
2899 |
// control to the "slow" operators in synchronizer.cpp. |
|
2900 |
||
18097 | 2901 |
// RScratch contains the fetched obj->mark value from the failed CAS. |
14631 | 2902 |
#ifdef _LP64 |
2903 |
sub(Rscratch, STACK_BIAS, Rscratch); |
|
2904 |
#endif |
|
2905 |
sub(Rscratch, SP, Rscratch); |
|
2906 |
assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); |
|
2907 |
andcc(Rscratch, 0xfffff003, Rscratch); |
|
2908 |
if (counters != NULL) { |
|
2909 |
// Accounting needs the Rscratch register |
|
2910 |
st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); |
|
2911 |
cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); |
|
2912 |
ba_short(done); |
|
2913 |
} else { |
|
2914 |
ba(done); |
|
2915 |
delayed()->st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); |
|
2916 |
} |
|
2917 |
||
2918 |
bind (IsInflated); |
|
2919 |
||
2920 |
// Try to CAS m->owner from null to Self |
|
2921 |
// Invariant: if we acquire the lock then _recursions should be 0. |
|
27608 | 2922 |
add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); |
14631 | 2923 |
mov(G2_thread, Rscratch); |
18097 | 2924 |
cas_ptr(Rmark, G0, Rscratch); |
29070 | 2925 |
andcc(Rscratch, Rscratch, G0); // set ICCs for done: icc.zf iff success |
2926 |
// set icc.zf : 1=success 0=failure |
|
14631 | 2927 |
// ST box->displaced_header = NonZero. |
2928 |
// Any non-zero value suffices: |
|
27608 | 2929 |
// markOopDesc::unused_mark(), G2_thread, RBox, RScratch, rsp, etc. |
14631 | 2930 |
st_ptr(Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes()); |
2931 |
// Intentional fall-through into done |
|
2932 |
} |
|
2933 |
||
2934 |
bind (done); |
|
2935 |
} |
|
2936 |
||
2937 |
void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark, |
|
2938 |
Register Rbox, Register Rscratch, |
|
2939 |
bool try_bias) { |
|
2940 |
Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); |
|
2941 |
||
2942 |
Label done ; |
|
2943 |
||
2944 |
if (EmitSync & 4) { |
|
2945 |
cmp(SP, G0); |
|
2946 |
return ; |
|
2947 |
} |
|
2948 |
||
2949 |
if (EmitSync & 8) { |
|
2950 |
if (try_bias) { |
|
2951 |
biased_locking_exit(mark_addr, Rscratch, done); |
|
2952 |
} |
|
2953 |
||
2954 |
// Test first if it is a fast recursive unlock |
|
2955 |
ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rmark); |
|
2956 |
br_null_short(Rmark, Assembler::pt, done); |
|
2957 |
||
2958 |
// Check if it is still a light weight lock, this is is true if we see |
|
2959 |
// the stack address of the basicLock in the markOop of the object |
|
2960 |
assert(mark_addr.disp() == 0, "cas must take a zero displacement"); |
|
18097 | 2961 |
cas_ptr(mark_addr.base(), Rbox, Rmark); |
14631 | 2962 |
ba(done); |
2963 |
delayed()->cmp(Rbox, Rmark); |
|
2964 |
bind(done); |
|
2965 |
return ; |
|
2966 |
} |
|
2967 |
||
2968 |
// Beware ... If the aggregate size of the code emitted by CLO and CUO is |
|
2969 |
// is too large performance rolls abruptly off a cliff. |
|
2970 |
// This could be related to inlining policies, code cache management, or |
|
2971 |
// I$ effects. |
|
2972 |
Label LStacked ; |
|
2973 |
||
2974 |
if (try_bias) { |
|
2975 |
// TODO: eliminate redundant LDs of obj->mark |
|
2976 |
biased_locking_exit(mark_addr, Rscratch, done); |
|
2977 |
} |
|
2978 |
||
2979 |
ld_ptr(Roop, oopDesc::mark_offset_in_bytes(), Rmark); |
|
2980 |
ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rscratch); |
|
2981 |
andcc(Rscratch, Rscratch, G0); |
|
2982 |
brx(Assembler::zero, false, Assembler::pn, done); |
|
2983 |
delayed()->nop(); // consider: relocate fetch of mark, above, into this DS |
|
2984 |
andcc(Rmark, 2, G0); |
|
2985 |
brx(Assembler::zero, false, Assembler::pt, LStacked); |
|
2986 |
delayed()->nop(); |
|
2987 |
||
2988 |
// It's inflated |
|
2989 |
// Conceptually we need a #loadstore|#storestore "release" MEMBAR before |
|
2990 |
// the ST of 0 into _owner which releases the lock. This prevents loads |
|
2991 |
// and stores within the critical section from reordering (floating) |
|
2992 |
// past the store that releases the lock. But TSO is a strong memory model |
|
2993 |
// and that particular flavor of barrier is a noop, so we can safely elide it. |
|
2994 |
// Note that we use 1-0 locking by default for the inflated case. We |
|
30244 | 2995 |
// close the resultant (and rare) race by having contended threads in |
14631 | 2996 |
// monitorenter periodically poll _owner. |
30244 | 2997 |
|
2998 |
if (EmitSync & 1024) { |
|
2999 |
// Emit code to check that _owner == Self |
|
3000 |
// We could fold the _owner test into subsequent code more efficiently |
|
3001 |
// than using a stand-alone check, but since _owner checking is off by |
|
3002 |
// default we don't bother. We also might consider predicating the |
|
3003 |
// _owner==Self check on Xcheck:jni or running on a debug build. |
|
3004 |
ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), Rscratch); |
|
3005 |
orcc(Rscratch, G0, G0); |
|
3006 |
brx(Assembler::notZero, false, Assembler::pn, done); |
|
3007 |
delayed()->nop(); |
|
3008 |
} |
|
3009 |
||
3010 |
if (EmitSync & 512) { |
|
3011 |
// classic lock release code absent 1-0 locking |
|
3012 |
// m->Owner = null; |
|
3013 |
// membar #storeload |
|
3014 |
// if (m->cxq|m->EntryList) == null goto Success |
|
3015 |
// if (m->succ != null) goto Success |
|
3016 |
// if CAS (&m->Owner,0,Self) != 0 goto Success |
|
3017 |
// goto SlowPath |
|
3018 |
ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), Rbox); |
|
3019 |
orcc(Rbox, G0, G0); |
|
3020 |
brx(Assembler::notZero, false, Assembler::pn, done); |
|
3021 |
delayed()->nop(); |
|
3022 |
st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); |
|
3023 |
if (os::is_MP()) { membar(StoreLoad); } |
|
3024 |
ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)), Rscratch); |
|
3025 |
ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)), Rbox); |
|
3026 |
orcc(Rbox, Rscratch, G0); |
|
3027 |
brx(Assembler::zero, false, Assembler::pt, done); |
|
3028 |
delayed()-> |
|
3029 |
ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch); |
|
3030 |
andcc(Rscratch, Rscratch, G0); |
|
3031 |
brx(Assembler::notZero, false, Assembler::pt, done); |
|
3032 |
delayed()->andcc(G0, G0, G0); |
|
3033 |
add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); |
|
3034 |
mov(G2_thread, Rscratch); |
|
3035 |
cas_ptr(Rmark, G0, Rscratch); |
|
3036 |
cmp(Rscratch, G0); |
|
3037 |
// invert icc.zf and goto done |
|
3038 |
brx(Assembler::notZero, false, Assembler::pt, done); |
|
3039 |
delayed()->cmp(G0, G0); |
|
3040 |
br(Assembler::always, false, Assembler::pt, done); |
|
3041 |
delayed()->cmp(G0, 1); |
|
14631 | 3042 |
} else { |
30244 | 3043 |
// 1-0 form : avoids CAS and MEMBAR in the common case |
3044 |
// Do not bother to ratify that m->Owner == Self. |
|
3045 |
ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), Rbox); |
|
3046 |
orcc(Rbox, G0, G0); |
|
3047 |
brx(Assembler::notZero, false, Assembler::pn, done); |
|
3048 |
delayed()-> |
|
3049 |
ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)), Rscratch); |
|
3050 |
ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)), Rbox); |
|
3051 |
orcc(Rbox, Rscratch, G0); |
|
3052 |
if (EmitSync & 16384) { |
|
3053 |
// As an optional optimization, if (EntryList|cxq) != null and _succ is null then |
|
3054 |
// we should transfer control directly to the slow-path. |
|
3055 |
// This test makes the reacquire operation below very infrequent. |
|
3056 |
// The logic is equivalent to : |
|
3057 |
// if (cxq|EntryList) == null : Owner=null; goto Success |
|
3058 |
// if succ == null : goto SlowPath |
|
3059 |
// Owner=null; membar #storeload |
|
3060 |
// if succ != null : goto Success |
|
3061 |
// if CAS(&Owner,null,Self) != null goto Success |
|
3062 |
// goto SlowPath |
|
3063 |
brx(Assembler::zero, true, Assembler::pt, done); |
|
3064 |
delayed()-> |
|
3065 |
st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); |
|
3066 |
ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch); |
|
3067 |
andcc(Rscratch, Rscratch, G0) ; |
|
3068 |
brx(Assembler::zero, false, Assembler::pt, done); |
|
3069 |
delayed()->orcc(G0, 1, G0); |
|
3070 |
st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); |
|
3071 |
} else { |
|
3072 |
brx(Assembler::zero, false, Assembler::pt, done); |
|
3073 |
delayed()-> |
|
3074 |
st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); |
|
3075 |
} |
|
3076 |
if (os::is_MP()) { membar(StoreLoad); } |
|
3077 |
// Check that _succ is (or remains) non-zero |
|
3078 |
ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch); |
|
3079 |
andcc(Rscratch, Rscratch, G0); |
|
3080 |
brx(Assembler::notZero, false, Assembler::pt, done); |
|
3081 |
delayed()->andcc(G0, G0, G0); |
|
3082 |
add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); |
|
3083 |
mov(G2_thread, Rscratch); |
|
3084 |
cas_ptr(Rmark, G0, Rscratch); |
|
3085 |
cmp(Rscratch, G0); |
|
3086 |
// invert icc.zf and goto done |
|
3087 |
// A slightly better v8+/v9 idiom would be the following: |
|
3088 |
// movrnz Rscratch,1,Rscratch |
|
3089 |
// ba done |
|
3090 |
// xorcc Rscratch,1,G0 |
|
3091 |
// In v8+ mode the idiom would be valid IFF Rscratch was a G or O register |
|
3092 |
brx(Assembler::notZero, false, Assembler::pt, done); |
|
3093 |
delayed()->cmp(G0, G0); |
|
3094 |
br(Assembler::always, false, Assembler::pt, done); |
|
3095 |
delayed()->cmp(G0, 1); |
|
14631 | 3096 |
} |
3097 |
||
3098 |
bind (LStacked); |
|
3099 |
// Consider: we could replace the expensive CAS in the exit |
|
3100 |
// path with a simple ST of the displaced mark value fetched from |
|
3101 |
// the on-stack basiclock box. That admits a race where a thread T2 |
|
3102 |
// in the slow lock path -- inflating with monitor M -- could race a |
|
3103 |
// thread T1 in the fast unlock path, resulting in a missed wakeup for T2. |
|
3104 |
// More precisely T1 in the stack-lock unlock path could "stomp" the |
|
3105 |
// inflated mark value M installed by T2, resulting in an orphan |
|
3106 |
// object monitor M and T2 becoming stranded. We can remedy that situation |
|
3107 |
// by having T2 periodically poll the object's mark word using timed wait |
|
3108 |
// operations. If T2 discovers that a stomp has occurred it vacates |
|
3109 |
// the monitor M and wakes any other threads stranded on the now-orphan M. |
|
3110 |
// In addition the monitor scavenger, which performs deflation, |
|
3111 |
// would also need to check for orpan monitors and stranded threads. |
|
3112 |
// |
|
3113 |
// Finally, inflation is also used when T2 needs to assign a hashCode |
|
3114 |
// to O and O is stack-locked by T1. The "stomp" race could cause |
|
3115 |
// an assigned hashCode value to be lost. We can avoid that condition |
|
3116 |
// and provide the necessary hashCode stability invariants by ensuring |
|
3117 |
// that hashCode generation is idempotent between copying GCs. |
|
3118 |
// For example we could compute the hashCode of an object O as |
|
3119 |
// O's heap address XOR some high quality RNG value that is refreshed |
|
3120 |
// at GC-time. The monitor scavenger would install the hashCode |
|
3121 |
// found in any orphan monitors. Again, the mechanism admits a |
|
3122 |
// lost-update "stomp" WAW race but detects and recovers as needed. |
|
3123 |
// |
|
3124 |
// A prototype implementation showed excellent results, although |
|
3125 |
// the scavenger and timeout code was rather involved. |
|
3126 |
||
18097 | 3127 |
cas_ptr(mark_addr.base(), Rbox, Rscratch); |
14631 | 3128 |
cmp(Rbox, Rscratch); |
3129 |
// Intentional fall through into done ... |
|
3130 |
||
3131 |
bind(done); |
|
3132 |
} |
|
3133 |
||
3134 |
||
3135 |
||
3136 |
void MacroAssembler::print_CPU_state() { |
|
3137 |
// %%%%% need to implement this |
|
3138 |
} |
|
3139 |
||
3140 |
void MacroAssembler::verify_FPU(int stack_depth, const char* s) { |
|
3141 |
// %%%%% need to implement this |
|
3142 |
} |
|
3143 |
||
3144 |
void MacroAssembler::push_IU_state() { |
|
3145 |
// %%%%% need to implement this |
|
3146 |
} |
|
3147 |
||
3148 |
||
3149 |
void MacroAssembler::pop_IU_state() { |
|
3150 |
// %%%%% need to implement this |
|
3151 |
} |
|
3152 |
||
3153 |
||
3154 |
void MacroAssembler::push_FPU_state() { |
|
3155 |
// %%%%% need to implement this |
|
3156 |
} |
|
3157 |
||
3158 |
||
3159 |
void MacroAssembler::pop_FPU_state() { |
|
3160 |
// %%%%% need to implement this |
|
3161 |
} |
|
3162 |
||
3163 |
||
3164 |
void MacroAssembler::push_CPU_state() { |
|
3165 |
// %%%%% need to implement this |
|
3166 |
} |
|
3167 |
||
3168 |
||
3169 |
void MacroAssembler::pop_CPU_state() { |
|
3170 |
// %%%%% need to implement this |
|
3171 |
} |
|
3172 |
||
3173 |
||
3174 |
||
3175 |
void MacroAssembler::verify_tlab() { |
|
3176 |
#ifdef ASSERT |
|
3177 |
if (UseTLAB && VerifyOops) { |
|
3178 |
Label next, next2, ok; |
|
3179 |
Register t1 = L0; |
|
3180 |
Register t2 = L1; |
|
3181 |
Register t3 = L2; |
|
3182 |
||
3183 |
save_frame(0); |
|
3184 |
ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); |
|
3185 |
ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2); |
|
3186 |
or3(t1, t2, t3); |
|
3187 |
cmp_and_br_short(t1, t2, Assembler::greaterEqual, Assembler::pn, next); |
|
3188 |
STOP("assert(top >= start)"); |
|
3189 |
should_not_reach_here(); |
|
3190 |
||
3191 |
bind(next); |
|
3192 |
ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); |
|
3193 |
ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2); |
|
3194 |
or3(t3, t2, t3); |
|
3195 |
cmp_and_br_short(t1, t2, Assembler::lessEqual, Assembler::pn, next2); |
|
3196 |
STOP("assert(top <= end)"); |
|
3197 |
should_not_reach_here(); |
|
3198 |
||
3199 |
bind(next2); |
|
3200 |
and3(t3, MinObjAlignmentInBytesMask, t3); |
|
3201 |
cmp_and_br_short(t3, 0, Assembler::lessEqual, Assembler::pn, ok); |
|
3202 |
STOP("assert(aligned)"); |
|
3203 |
should_not_reach_here(); |
|
3204 |
||
3205 |
bind(ok); |
|
3206 |
restore(); |
|
3207 |
} |
|
3208 |
#endif |
|
3209 |
} |
|
3210 |
||
3211 |
||
3212 |
void MacroAssembler::eden_allocate( |
|
3213 |
Register obj, // result: pointer to object after successful allocation |
|
3214 |
Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise |
|
3215 |
int con_size_in_bytes, // object size in bytes if known at compile time |
|
3216 |
Register t1, // temp register |
|
3217 |
Register t2, // temp register |
|
3218 |
Label& slow_case // continuation point if fast allocation fails |
|
3219 |
){ |
|
3220 |
// make sure arguments make sense |
|
3221 |
assert_different_registers(obj, var_size_in_bytes, t1, t2); |
|
3222 |
assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size"); |
|
3223 |
assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); |
|
3224 |
||
27625 | 3225 |
if (!Universe::heap()->supports_inline_contig_alloc()) { |
14631 | 3226 |
// No allocation in the shared eden. |
18095
0ca57ec93f20
8015437: SPARC cbcond branch offset out of 10-bit range
morris
parents:
16368
diff
changeset
|
3227 |
ba(slow_case); |
0ca57ec93f20
8015437: SPARC cbcond branch offset out of 10-bit range
morris
parents:
16368
diff
changeset
|
3228 |
delayed()->nop(); |
14631 | 3229 |
} else { |
3230 |
// get eden boundaries |
|
3231 |
// note: we need both top & top_addr! |
|
3232 |
const Register top_addr = t1; |
|
3233 |
const Register end = t2; |
|
3234 |
||
3235 |
CollectedHeap* ch = Universe::heap(); |
|
3236 |
set((intx)ch->top_addr(), top_addr); |
|
3237 |
intx delta = (intx)ch->end_addr() - (intx)ch->top_addr(); |
|
3238 |
ld_ptr(top_addr, delta, end); |
|
3239 |
ld_ptr(top_addr, 0, obj); |
|
3240 |
||
3241 |
// try to allocate |
|
3242 |
Label retry; |
|
3243 |
bind(retry); |
|
3244 |
#ifdef ASSERT |
|
3245 |
// make sure eden top is properly aligned |
|
3246 |
{ |
|
3247 |
Label L; |
|
3248 |
btst(MinObjAlignmentInBytesMask, obj); |
|
3249 |
br(Assembler::zero, false, Assembler::pt, L); |
|
3250 |
delayed()->nop(); |
|
3251 |
STOP("eden top is not properly aligned"); |
|
3252 |
bind(L); |
|
3253 |
} |
|
3254 |
#endif // ASSERT |
|
3255 |
const Register free = end; |
|
3256 |
sub(end, obj, free); // compute amount of free space |
|
3257 |
if (var_size_in_bytes->is_valid()) { |
|
3258 |
// size is unknown at compile time |
|
3259 |
cmp(free, var_size_in_bytes); |
|
38042
95b4f1f6cb72
8151708: C1 FastTLABRefill can allocate TLABs past the end of the heap
zmajo
parents:
36808
diff
changeset
|
3260 |
brx(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case |
14631 | 3261 |
delayed()->add(obj, var_size_in_bytes, end); |
3262 |
} else { |
|
3263 |
// size is known at compile time |
|
3264 |
cmp(free, con_size_in_bytes); |
|
38042
95b4f1f6cb72
8151708: C1 FastTLABRefill can allocate TLABs past the end of the heap
zmajo
parents:
36808
diff
changeset
|
3265 |
brx(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case |
14631 | 3266 |
delayed()->add(obj, con_size_in_bytes, end); |
3267 |
} |
|
3268 |
// Compare obj with the value at top_addr; if still equal, swap the value of |
|
3269 |
// end with the value at top_addr. If not equal, read the value at top_addr |
|
3270 |
// into end. |
|
18097 | 3271 |
cas_ptr(top_addr, obj, end); |
14631 | 3272 |
// if someone beat us on the allocation, try again, otherwise continue |
3273 |
cmp(obj, end); |
|
3274 |
brx(Assembler::notEqual, false, Assembler::pn, retry); |
|
3275 |
delayed()->mov(end, obj); // nop if successfull since obj == end |
|
3276 |
||
3277 |
#ifdef ASSERT |
|
3278 |
// make sure eden top is properly aligned |
|
3279 |
{ |
|
3280 |
Label L; |
|
3281 |
const Register top_addr = t1; |
|
3282 |
||
3283 |
set((intx)ch->top_addr(), top_addr); |
|
3284 |
ld_ptr(top_addr, 0, top_addr); |
|
3285 |
btst(MinObjAlignmentInBytesMask, top_addr); |
|
3286 |
br(Assembler::zero, false, Assembler::pt, L); |
|
3287 |
delayed()->nop(); |
|
3288 |
STOP("eden top is not properly aligned"); |
|
3289 |
bind(L); |
|
3290 |
} |
|
3291 |
#endif // ASSERT |
|
3292 |
} |
|
3293 |
} |
|
3294 |
||
3295 |
||
3296 |
void MacroAssembler::tlab_allocate( |
|
3297 |
Register obj, // result: pointer to object after successful allocation |
|
3298 |
Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise |
|
3299 |
int con_size_in_bytes, // object size in bytes if known at compile time |
|
3300 |
Register t1, // temp register |
|
3301 |
Label& slow_case // continuation point if fast allocation fails |
|
3302 |
){ |
|
3303 |
// make sure arguments make sense |
|
3304 |
assert_different_registers(obj, var_size_in_bytes, t1); |
|
3305 |
assert(0 <= con_size_in_bytes && is_simm13(con_size_in_bytes), "illegal object size"); |
|
3306 |
assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); |
|
3307 |
||
3308 |
const Register free = t1; |
|
3309 |
||
3310 |
verify_tlab(); |
|
3311 |
||
3312 |
ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), obj); |
|
3313 |
||
3314 |
// calculate amount of free space |
|
3315 |
ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), free); |
|
3316 |
sub(free, obj, free); |
|
3317 |
||
3318 |
Label done; |
|
3319 |
if (var_size_in_bytes == noreg) { |
|
3320 |
cmp(free, con_size_in_bytes); |
|
3321 |
} else { |
|
3322 |
cmp(free, var_size_in_bytes); |
|
3323 |
} |
|
3324 |
br(Assembler::less, false, Assembler::pn, slow_case); |
|
3325 |
// calculate the new top pointer |
|
3326 |
if (var_size_in_bytes == noreg) { |
|
3327 |
delayed()->add(obj, con_size_in_bytes, free); |
|
3328 |
} else { |
|
3329 |
delayed()->add(obj, var_size_in_bytes, free); |
|
3330 |
} |
|
3331 |
||
3332 |
bind(done); |
|
3333 |
||
3334 |
#ifdef ASSERT |
|
3335 |
// make sure new free pointer is properly aligned |
|
3336 |
{ |
|
3337 |
Label L; |
|
3338 |
btst(MinObjAlignmentInBytesMask, free); |
|
3339 |
br(Assembler::zero, false, Assembler::pt, L); |
|
3340 |
delayed()->nop(); |
|
3341 |
STOP("updated TLAB free is not properly aligned"); |
|
3342 |
bind(L); |
|
3343 |
} |
|
3344 |
#endif // ASSERT |
|
3345 |
||
3346 |
// update the tlab top pointer |
|
3347 |
st_ptr(free, G2_thread, in_bytes(JavaThread::tlab_top_offset())); |
|
3348 |
verify_tlab(); |
|
3349 |
} |
|
3350 |
||
3351 |
||
3352 |
void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case) { |
|
3353 |
Register top = O0; |
|
3354 |
Register t1 = G1; |
|
3355 |
Register t2 = G3; |
|
3356 |
Register t3 = O1; |
|
3357 |
assert_different_registers(top, t1, t2, t3, G4, G5 /* preserve G4 and G5 */); |
|
3358 |
Label do_refill, discard_tlab; |
|
3359 |
||
27625 | 3360 |
if (!Universe::heap()->supports_inline_contig_alloc()) { |
14631 | 3361 |
// No allocation in the shared eden. |
21088 | 3362 |
ba(slow_case); |
3363 |
delayed()->nop(); |
|
14631 | 3364 |
} |
3365 |
||
3366 |
ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top); |
|
3367 |
ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t1); |
|
3368 |
ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), t2); |
|
3369 |
||
3370 |
// calculate amount of free space |
|
3371 |
sub(t1, top, t1); |
|
3372 |
srl_ptr(t1, LogHeapWordSize, t1); |
|
3373 |
||
3374 |
// Retain tlab and allocate object in shared space if |
|
3375 |
// the amount free in the tlab is too large to discard. |
|
3376 |
cmp(t1, t2); |
|
35470
75c679ad0747
8144573: TLABWasteIncrement=max_jint fires an assert on SPARC for non-G1 GC mode
sangheki
parents:
35214
diff
changeset
|
3377 |
|
14631 | 3378 |
brx(Assembler::lessEqual, false, Assembler::pt, discard_tlab); |
3379 |
// increment waste limit to prevent getting stuck on this slow path |
|
35470
75c679ad0747
8144573: TLABWasteIncrement=max_jint fires an assert on SPARC for non-G1 GC mode
sangheki
parents:
35214
diff
changeset
|
3380 |
if (Assembler::is_simm13(ThreadLocalAllocBuffer::refill_waste_limit_increment())) { |
75c679ad0747
8144573: TLABWasteIncrement=max_jint fires an assert on SPARC for non-G1 GC mode
sangheki
parents:
35214
diff
changeset
|
3381 |
delayed()->add(t2, ThreadLocalAllocBuffer::refill_waste_limit_increment(), t2); |
75c679ad0747
8144573: TLABWasteIncrement=max_jint fires an assert on SPARC for non-G1 GC mode
sangheki
parents:
35214
diff
changeset
|
3382 |
} else { |
75c679ad0747
8144573: TLABWasteIncrement=max_jint fires an assert on SPARC for non-G1 GC mode
sangheki
parents:
35214
diff
changeset
|
3383 |
delayed()->nop(); |
75c679ad0747
8144573: TLABWasteIncrement=max_jint fires an assert on SPARC for non-G1 GC mode
sangheki
parents:
35214
diff
changeset
|
3384 |
// set64 does not use the temp register if the given constant is 32 bit. So |
75c679ad0747
8144573: TLABWasteIncrement=max_jint fires an assert on SPARC for non-G1 GC mode
sangheki
parents:
35214
diff
changeset
|
3385 |
// we can just use any register; using G0 results in ignoring of the upper 32 bit |
75c679ad0747
8144573: TLABWasteIncrement=max_jint fires an assert on SPARC for non-G1 GC mode
sangheki
parents:
35214
diff
changeset
|
3386 |
// of that value. |
75c679ad0747
8144573: TLABWasteIncrement=max_jint fires an assert on SPARC for non-G1 GC mode
sangheki
parents:
35214
diff
changeset
|
3387 |
set64(ThreadLocalAllocBuffer::refill_waste_limit_increment(), t3, G0); |
75c679ad0747
8144573: TLABWasteIncrement=max_jint fires an assert on SPARC for non-G1 GC mode
sangheki
parents:
35214
diff
changeset
|
3388 |
add(t2, t3, t2); |
75c679ad0747
8144573: TLABWasteIncrement=max_jint fires an assert on SPARC for non-G1 GC mode
sangheki
parents:
35214
diff
changeset
|
3389 |
} |
75c679ad0747
8144573: TLABWasteIncrement=max_jint fires an assert on SPARC for non-G1 GC mode
sangheki
parents:
35214
diff
changeset
|
3390 |
|
14631 | 3391 |
st_ptr(t2, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset())); |
3392 |
if (TLABStats) { |
|
3393 |
// increment number of slow_allocations |
|
3394 |
ld(G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()), t2); |
|
3395 |
add(t2, 1, t2); |
|
3396 |
stw(t2, G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset())); |
|
3397 |
} |
|
21088 | 3398 |
ba(try_eden); |
3399 |
delayed()->nop(); |
|
14631 | 3400 |
|
3401 |
bind(discard_tlab); |
|
3402 |
if (TLABStats) { |
|
3403 |
// increment number of refills |
|
3404 |
ld(G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()), t2); |
|
3405 |
add(t2, 1, t2); |
|
3406 |
stw(t2, G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset())); |
|
3407 |
// accumulate wastage |
|
3408 |
ld(G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()), t2); |
|
3409 |
add(t2, t1, t2); |
|
3410 |
stw(t2, G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset())); |
|
3411 |
} |
|
3412 |
||
3413 |
// if tlab is currently allocated (top or end != null) then |
|
3414 |
// fill [top, end + alignment_reserve) with array object |
|
3415 |
br_null_short(top, Assembler::pn, do_refill); |
|
3416 |
||
3417 |
set((intptr_t)markOopDesc::prototype()->copy_set_hash(0x2), t2); |
|
3418 |
st_ptr(t2, top, oopDesc::mark_offset_in_bytes()); // set up the mark word |
|
3419 |
// set klass to intArrayKlass |
|
3420 |
sub(t1, typeArrayOopDesc::header_size(T_INT), t1); |
|
3421 |
add(t1, ThreadLocalAllocBuffer::alignment_reserve(), t1); |
|
3422 |
sll_ptr(t1, log2_intptr(HeapWordSize/sizeof(jint)), t1); |
|
3423 |
st(t1, top, arrayOopDesc::length_offset_in_bytes()); |
|
3424 |
set((intptr_t)Universe::intArrayKlassObj_addr(), t2); |
|
3425 |
ld_ptr(t2, 0, t2); |
|
3426 |
// store klass last. concurrent gcs assumes klass length is valid if |
|
3427 |
// klass field is not null. |
|
3428 |
store_klass(t2, top); |
|
3429 |
verify_oop(top); |
|
3430 |
||
3431 |
ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t1); |
|
3432 |
sub(top, t1, t1); // size of tlab's allocated portion |
|
3433 |
incr_allocated_bytes(t1, t2, t3); |
|
3434 |
||
3435 |
// refill the tlab with an eden allocation |
|
3436 |
bind(do_refill); |
|
3437 |
ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t1); |
|
3438 |
sll_ptr(t1, LogHeapWordSize, t1); |
|
3439 |
// allocate new tlab, address returned in top |
|
3440 |
eden_allocate(top, t1, 0, t2, t3, slow_case); |
|
3441 |
||
3442 |
st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_start_offset())); |
|
3443 |
st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_top_offset())); |
|
3444 |
#ifdef ASSERT |
|
3445 |
// check that tlab_size (t1) is still valid |
|
3446 |
{ |
|
3447 |
Label ok; |
|
3448 |
ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t2); |
|
3449 |
sll_ptr(t2, LogHeapWordSize, t2); |
|
3450 |
cmp_and_br_short(t1, t2, Assembler::equal, Assembler::pt, ok); |
|
3451 |
STOP("assert(t1 == tlab_size)"); |
|
3452 |
should_not_reach_here(); |
|
3453 |
||
3454 |
bind(ok); |
|
3455 |
} |
|
3456 |
#endif // ASSERT |
|
3457 |
add(top, t1, top); // t1 is tlab_size |
|
3458 |
sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top); |
|
3459 |
st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset())); |
|
35548
8d3afe96ffea
8086053: Address inconsistencies regarding ZeroTLAB
zmajo
parents:
35232
diff
changeset
|
3460 |
|
8d3afe96ffea
8086053: Address inconsistencies regarding ZeroTLAB
zmajo
parents:
35232
diff
changeset
|
3461 |
if (ZeroTLAB) { |
8d3afe96ffea
8086053: Address inconsistencies regarding ZeroTLAB
zmajo
parents:
35232
diff
changeset
|
3462 |
// This is a fast TLAB refill, therefore the GC is not notified of it. |
8d3afe96ffea
8086053: Address inconsistencies regarding ZeroTLAB
zmajo
parents:
35232
diff
changeset
|
3463 |
// So compiled code must fill the new TLAB with zeroes. |
8d3afe96ffea
8086053: Address inconsistencies regarding ZeroTLAB
zmajo
parents:
35232
diff
changeset
|
3464 |
ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2); |
8d3afe96ffea
8086053: Address inconsistencies regarding ZeroTLAB
zmajo
parents:
35232
diff
changeset
|
3465 |
zero_memory(t2, t1); |
8d3afe96ffea
8086053: Address inconsistencies regarding ZeroTLAB
zmajo
parents:
35232
diff
changeset
|
3466 |
} |
14631 | 3467 |
verify_tlab(); |
21088 | 3468 |
ba(retry); |
3469 |
delayed()->nop(); |
|
14631 | 3470 |
} |
3471 |
||
35548
8d3afe96ffea
8086053: Address inconsistencies regarding ZeroTLAB
zmajo
parents:
35232
diff
changeset
|
3472 |
void MacroAssembler::zero_memory(Register base, Register index) { |
8d3afe96ffea
8086053: Address inconsistencies regarding ZeroTLAB
zmajo
parents:
35232
diff
changeset
|
3473 |
assert_different_registers(base, index); |
8d3afe96ffea
8086053: Address inconsistencies regarding ZeroTLAB
zmajo
parents:
35232
diff
changeset
|
3474 |
Label loop; |
8d3afe96ffea
8086053: Address inconsistencies regarding ZeroTLAB
zmajo
parents:
35232
diff
changeset
|
3475 |
bind(loop); |
8d3afe96ffea
8086053: Address inconsistencies regarding ZeroTLAB
zmajo
parents:
35232
diff
changeset
|
3476 |
subcc(index, HeapWordSize, index); |
8d3afe96ffea
8086053: Address inconsistencies regarding ZeroTLAB
zmajo
parents:
35232
diff
changeset
|
3477 |
brx(Assembler::greaterEqual, true, Assembler::pt, loop); |
8d3afe96ffea
8086053: Address inconsistencies regarding ZeroTLAB
zmajo
parents:
35232
diff
changeset
|
3478 |
delayed()->st_ptr(G0, base, index); |
8d3afe96ffea
8086053: Address inconsistencies regarding ZeroTLAB
zmajo
parents:
35232
diff
changeset
|
3479 |
} |
8d3afe96ffea
8086053: Address inconsistencies regarding ZeroTLAB
zmajo
parents:
35232
diff
changeset
|
3480 |
|
14631 | 3481 |
void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes, |
3482 |
Register t1, Register t2) { |
|
3483 |
// Bump total bytes allocated by this thread |
|
3484 |
assert(t1->is_global(), "must be global reg"); // so all 64 bits are saved on a context switch |
|
3485 |
assert_different_registers(size_in_bytes.register_or_noreg(), t1, t2); |
|
3486 |
// v8 support has gone the way of the dodo |
|
3487 |
ldx(G2_thread, in_bytes(JavaThread::allocated_bytes_offset()), t1); |
|
3488 |
add(t1, ensure_simm13_or_reg(size_in_bytes, t2), t1); |
|
3489 |
stx(t1, G2_thread, in_bytes(JavaThread::allocated_bytes_offset())); |
|
3490 |
} |
|
3491 |
||
3492 |
Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { |
|
3493 |
switch (cond) { |
|
3494 |
// Note some conditions are synonyms for others |
|
3495 |
case Assembler::never: return Assembler::always; |
|
3496 |
case Assembler::zero: return Assembler::notZero; |
|
3497 |
case Assembler::lessEqual: return Assembler::greater; |
|
3498 |
case Assembler::less: return Assembler::greaterEqual; |
|
3499 |
case Assembler::lessEqualUnsigned: return Assembler::greaterUnsigned; |
|
3500 |
case Assembler::lessUnsigned: return Assembler::greaterEqualUnsigned; |
|
3501 |
case Assembler::negative: return Assembler::positive; |
|
3502 |
case Assembler::overflowSet: return Assembler::overflowClear; |
|
3503 |
case Assembler::always: return Assembler::never; |
|
3504 |
case Assembler::notZero: return Assembler::zero; |
|
3505 |
case Assembler::greater: return Assembler::lessEqual; |
|
3506 |
case Assembler::greaterEqual: return Assembler::less; |
|
3507 |
case Assembler::greaterUnsigned: return Assembler::lessEqualUnsigned; |
|
3508 |
case Assembler::greaterEqualUnsigned: return Assembler::lessUnsigned; |
|
3509 |
case Assembler::positive: return Assembler::negative; |
|
3510 |
case Assembler::overflowClear: return Assembler::overflowSet; |
|
3511 |
} |
|
3512 |
||
3513 |
ShouldNotReachHere(); return Assembler::overflowClear; |
|
3514 |
} |
|
3515 |
||
3516 |
void MacroAssembler::cond_inc(Assembler::Condition cond, address counter_ptr, |
|
3517 |
Register Rtmp1, Register Rtmp2 /*, Register Rtmp3, Register Rtmp4 */) { |
|
3518 |
Condition negated_cond = negate_condition(cond); |
|
3519 |
Label L; |
|
3520 |
brx(negated_cond, false, Assembler::pt, L); |
|
3521 |
delayed()->nop(); |
|
3522 |
inc_counter(counter_ptr, Rtmp1, Rtmp2); |
|
3523 |
bind(L); |
|
3524 |
} |
|
3525 |
||
3526 |
void MacroAssembler::inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2) { |
|
3527 |
AddressLiteral addrlit(counter_addr); |
|
3528 |
sethi(addrlit, Rtmp1); // Move hi22 bits into temporary register. |
|
3529 |
Address addr(Rtmp1, addrlit.low10()); // Build an address with low10 bits. |
|
3530 |
ld(addr, Rtmp2); |
|
3531 |
inc(Rtmp2); |
|
3532 |
st(Rtmp2, addr); |
|
3533 |
} |
|
3534 |
||
3535 |
void MacroAssembler::inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2) { |
|
3536 |
inc_counter((address) counter_addr, Rtmp1, Rtmp2); |
|
3537 |
} |
|
3538 |
||
3539 |
SkipIfEqual::SkipIfEqual( |
|
3540 |
MacroAssembler* masm, Register temp, const bool* flag_addr, |
|
3541 |
Assembler::Condition condition) { |
|
3542 |
_masm = masm; |
|
3543 |
AddressLiteral flag(flag_addr); |
|
3544 |
_masm->sethi(flag, temp); |
|
3545 |
_masm->ldub(temp, flag.low10(), temp); |
|
3546 |
_masm->tst(temp); |
|
3547 |
_masm->br(condition, false, Assembler::pt, _label); |
|
3548 |
_masm->delayed()->nop(); |
|
3549 |
} |
|
3550 |
||
3551 |
SkipIfEqual::~SkipIfEqual() { |
|
3552 |
_masm->bind(_label); |
|
3553 |
} |
|
3554 |
||
3555 |
||
3556 |
// Writes to stack successive pages until offset reached to check for |
|
3557 |
// stack overflow + shadow pages. This clobbers tsp and scratch. |
|
3558 |
void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp, |
|
3559 |
Register Rscratch) { |
|
3560 |
// Use stack pointer in temp stack pointer |
|
3561 |
mov(SP, Rtsp); |
|
3562 |
||
3563 |
// Bang stack for total size given plus stack shadow page size. |
|
3564 |
// Bang one page at a time because a large size can overflow yellow and |
|
3565 |
// red zones (the bang will fail but stack overflow handling can't tell that |
|
3566 |
// it was a stack overflow bang vs a regular segv). |
|
3567 |
int offset = os::vm_page_size(); |
|
3568 |
Register Roffset = Rscratch; |
|
3569 |
||
3570 |
Label loop; |
|
3571 |
bind(loop); |
|
3572 |
set((-offset)+STACK_BIAS, Rscratch); |
|
3573 |
st(G0, Rtsp, Rscratch); |
|
3574 |
set(offset, Roffset); |
|
3575 |
sub(Rsize, Roffset, Rsize); |
|
3576 |
cmp(Rsize, G0); |
|
3577 |
br(Assembler::greater, false, Assembler::pn, loop); |
|
3578 |
delayed()->sub(Rtsp, Roffset, Rtsp); |
|
3579 |
||
3580 |
// Bang down shadow pages too. |
|
21528
479228ecf6ac
8026775: nsk/jvmti/RedefineClasses/StressRedefine crashes due to EXCEPTION_ACCESS_VIOLATION
mikael
parents:
21189
diff
changeset
|
3581 |
// At this point, (tmp-0) is the last address touched, so don't |
479228ecf6ac
8026775: nsk/jvmti/RedefineClasses/StressRedefine crashes due to EXCEPTION_ACCESS_VIOLATION
mikael
parents:
21189
diff
changeset
|
3582 |
// touch it again. (It was touched as (tmp-pagesize) but then tmp |
479228ecf6ac
8026775: nsk/jvmti/RedefineClasses/StressRedefine crashes due to EXCEPTION_ACCESS_VIOLATION
mikael
parents:
21189
diff
changeset
|
3583 |
// was post-decremented.) Skip this address by starting at i=1, and |
479228ecf6ac
8026775: nsk/jvmti/RedefineClasses/StressRedefine crashes due to EXCEPTION_ACCESS_VIOLATION
mikael
parents:
21189
diff
changeset
|
3584 |
// touch a few more pages below. N.B. It is important to touch all |
479228ecf6ac
8026775: nsk/jvmti/RedefineClasses/StressRedefine crashes due to EXCEPTION_ACCESS_VIOLATION
mikael
parents:
21189
diff
changeset
|
3585 |
// the way down to and including i=StackShadowPages. |
35201
996db89f378e
8139864: Improve handling of stack protection zones.
goetz
parents:
35071
diff
changeset
|
3586 |
for (int i = 1; i < JavaThread::stack_shadow_zone_size() / os::vm_page_size(); i++) { |
14631 | 3587 |
set((-i*offset)+STACK_BIAS, Rscratch); |
3588 |
st(G0, Rtsp, Rscratch); |
|
3589 |
} |
|
3590 |
} |
|
3591 |
||
35071
a0910b1d3e0d
8046936: JEP 270: Reserved Stack Areas for Critical Sections
fparain
parents:
34633
diff
changeset
|
3592 |
void MacroAssembler::reserved_stack_check() { |
a0910b1d3e0d
8046936: JEP 270: Reserved Stack Areas for Critical Sections
fparain
parents:
34633
diff
changeset
|
3593 |
// testing if reserved zone needs to be enabled |
a0910b1d3e0d
8046936: JEP 270: Reserved Stack Areas for Critical Sections
fparain
parents:
34633
diff
changeset
|
3594 |
Label no_reserved_zone_enabling; |
a0910b1d3e0d
8046936: JEP 270: Reserved Stack Areas for Critical Sections
fparain
parents:
34633
diff
changeset
|
3595 |
|
a0910b1d3e0d
8046936: JEP 270: Reserved Stack Areas for Critical Sections
fparain
parents:
34633
diff
changeset
|
3596 |
ld_ptr(G2_thread, JavaThread::reserved_stack_activation_offset(), G4_scratch); |
a0910b1d3e0d
8046936: JEP 270: Reserved Stack Areas for Critical Sections
fparain
parents:
34633
diff
changeset
|
3597 |
cmp_and_brx_short(SP, G4_scratch, Assembler::lessUnsigned, Assembler::pt, no_reserved_zone_enabling); |
a0910b1d3e0d
8046936: JEP 270: Reserved Stack Areas for Critical Sections
fparain
parents:
34633
diff
changeset
|
3598 |
|
a0910b1d3e0d
8046936: JEP 270: Reserved Stack Areas for Critical Sections
fparain
parents:
34633
diff
changeset
|
3599 |
call_VM_leaf(L0, CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), G2_thread); |
a0910b1d3e0d
8046936: JEP 270: Reserved Stack Areas for Critical Sections
fparain
parents:
34633
diff
changeset
|
3600 |
|
a0910b1d3e0d
8046936: JEP 270: Reserved Stack Areas for Critical Sections
fparain
parents:
34633
diff
changeset
|
3601 |
AddressLiteral stub(StubRoutines::throw_delayed_StackOverflowError_entry()); |
a0910b1d3e0d
8046936: JEP 270: Reserved Stack Areas for Critical Sections
fparain
parents:
34633
diff
changeset
|
3602 |
jump_to(stub, G4_scratch); |
a0910b1d3e0d
8046936: JEP 270: Reserved Stack Areas for Critical Sections
fparain
parents:
34633
diff
changeset
|
3603 |
delayed()->restore(); |
a0910b1d3e0d
8046936: JEP 270: Reserved Stack Areas for Critical Sections
fparain
parents:
34633
diff
changeset
|
3604 |
|
a0910b1d3e0d
8046936: JEP 270: Reserved Stack Areas for Critical Sections
fparain
parents:
34633
diff
changeset
|
3605 |
should_not_reach_here(); |
a0910b1d3e0d
8046936: JEP 270: Reserved Stack Areas for Critical Sections
fparain
parents:
34633
diff
changeset
|
3606 |
|
a0910b1d3e0d
8046936: JEP 270: Reserved Stack Areas for Critical Sections
fparain
parents:
34633
diff
changeset
|
3607 |
bind(no_reserved_zone_enabling); |
a0910b1d3e0d
8046936: JEP 270: Reserved Stack Areas for Critical Sections
fparain
parents:
34633
diff
changeset
|
3608 |
} |
a0910b1d3e0d
8046936: JEP 270: Reserved Stack Areas for Critical Sections
fparain
parents:
34633
diff
changeset
|
3609 |
|
14631 | 3610 |
/////////////////////////////////////////////////////////////////////////////////// |
15482
470d0b0c09f1
8005915: Unify SERIALGC and INCLUDE_ALTERNATE_GCS
jprovino
parents:
15116
diff
changeset
|
3611 |
#if INCLUDE_ALL_GCS |
14631 | 3612 |
|
3613 |
static address satb_log_enqueue_with_frame = NULL; |
|
3614 |
static u_char* satb_log_enqueue_with_frame_end = NULL; |
|
3615 |
||
3616 |
static address satb_log_enqueue_frameless = NULL; |
|
3617 |
static u_char* satb_log_enqueue_frameless_end = NULL; |
|
3618 |
||
3619 |
static int EnqueueCodeSize = 128 DEBUG_ONLY( + 256); // Instructions? |
|
3620 |
||
3621 |
static void generate_satb_log_enqueue(bool with_frame) { |
|
3622 |
BufferBlob* bb = BufferBlob::create("enqueue_with_frame", EnqueueCodeSize); |
|
3623 |
CodeBuffer buf(bb); |
|
3624 |
MacroAssembler masm(&buf); |
|
3625 |
||
3626 |
#define __ masm. |
|
3627 |
||
3628 |
address start = __ pc(); |
|
3629 |
Register pre_val; |
|
3630 |
||
3631 |
Label refill, restart; |
|
3632 |
if (with_frame) { |
|
3633 |
__ save_frame(0); |
|
3634 |
pre_val = I0; // Was O0 before the save. |
|
3635 |
} else { |
|
3636 |
pre_val = O0; |
|
3637 |
} |
|
3638 |
||
3639 |
int satb_q_index_byte_offset = |
|
3640 |
in_bytes(JavaThread::satb_mark_queue_offset() + |
|
34148
6efbc7ffd767
8143014: Access PtrQueue member offsets through derived classes
kbarrett
parents:
33628
diff
changeset
|
3641 |
SATBMarkQueue::byte_offset_of_index()); |
14631 | 3642 |
|
3643 |
int satb_q_buf_byte_offset = |
|
3644 |
in_bytes(JavaThread::satb_mark_queue_offset() + |
|
34148
6efbc7ffd767
8143014: Access PtrQueue member offsets through derived classes
kbarrett
parents:
33628
diff
changeset
|
3645 |
SATBMarkQueue::byte_offset_of_buf()); |
6efbc7ffd767
8143014: Access PtrQueue member offsets through derived classes
kbarrett
parents:
33628
diff
changeset
|
3646 |
|
6efbc7ffd767
8143014: Access PtrQueue member offsets through derived classes
kbarrett
parents:
33628
diff
changeset
|
3647 |
assert(in_bytes(SATBMarkQueue::byte_width_of_index()) == sizeof(intptr_t) && |
6efbc7ffd767
8143014: Access PtrQueue member offsets through derived classes
kbarrett
parents:
33628
diff
changeset
|
3648 |
in_bytes(SATBMarkQueue::byte_width_of_buf()) == sizeof(intptr_t), |
14631 | 3649 |
"check sizes in assembly below"); |
3650 |
||
3651 |
__ bind(restart); |
|
3652 |
||
34148
6efbc7ffd767
8143014: Access PtrQueue member offsets through derived classes
kbarrett
parents:
33628
diff
changeset
|
3653 |
// Load the index into the SATB buffer. SATBMarkQueue::_index is a size_t |
14631 | 3654 |
// so ld_ptr is appropriate. |
3655 |
__ ld_ptr(G2_thread, satb_q_index_byte_offset, L0); |
|
3656 |
||
3657 |
// index == 0? |
|
3658 |
__ cmp_and_brx_short(L0, G0, Assembler::equal, Assembler::pn, refill); |
|
3659 |
||
3660 |
__ ld_ptr(G2_thread, satb_q_buf_byte_offset, L1); |
|
3661 |
__ sub(L0, oopSize, L0); |
|
3662 |
||
3663 |
__ st_ptr(pre_val, L1, L0); // [_buf + index] := I0 |
|
3664 |
if (!with_frame) { |
|
3665 |
// Use return-from-leaf |
|
3666 |
__ retl(); |
|
3667 |
__ delayed()->st_ptr(L0, G2_thread, satb_q_index_byte_offset); |
|
3668 |
} else { |
|
3669 |
// Not delayed. |
|
3670 |
__ st_ptr(L0, G2_thread, satb_q_index_byte_offset); |
|
3671 |
} |
|
3672 |
if (with_frame) { |
|
3673 |
__ ret(); |
|
3674 |
__ delayed()->restore(); |
|
3675 |
} |
|
3676 |
__ bind(refill); |
|
3677 |
||
3678 |
address handle_zero = |
|
3679 |
CAST_FROM_FN_PTR(address, |
|
3680 |
&SATBMarkQueueSet::handle_zero_index_for_thread); |
|
3681 |
// This should be rare enough that we can afford to save all the |
|
3682 |
// scratch registers that the calling context might be using. |
|
3683 |
__ mov(G1_scratch, L0); |
|
3684 |
__ mov(G3_scratch, L1); |
|
3685 |
__ mov(G4, L2); |
|
3686 |
// We need the value of O0 above (for the write into the buffer), so we |
|
3687 |
// save and restore it. |
|
3688 |
__ mov(O0, L3); |
|
3689 |
// Since the call will overwrite O7, we save and restore that, as well. |
|
3690 |
__ mov(O7, L4); |
|
3691 |
__ call_VM_leaf(L5, handle_zero, G2_thread); |
|
3692 |
__ mov(L0, G1_scratch); |
|
3693 |
__ mov(L1, G3_scratch); |
|
3694 |
__ mov(L2, G4); |
|
3695 |
__ mov(L3, O0); |
|
3696 |
__ br(Assembler::always, /*annul*/false, Assembler::pt, restart); |
|
3697 |
__ delayed()->mov(L4, O7); |
|
3698 |
||
3699 |
if (with_frame) { |
|
3700 |
satb_log_enqueue_with_frame = start; |
|
3701 |
satb_log_enqueue_with_frame_end = __ pc(); |
|
3702 |
} else { |
|
3703 |
satb_log_enqueue_frameless = start; |
|
3704 |
satb_log_enqueue_frameless_end = __ pc(); |
|
3705 |
} |
|
3706 |
||
3707 |
#undef __ |
|
3708 |
} |
|
3709 |
||
3710 |
static inline void generate_satb_log_enqueue_if_necessary(bool with_frame) { |
|
3711 |
if (with_frame) { |
|
3712 |
if (satb_log_enqueue_with_frame == 0) { |
|
3713 |
generate_satb_log_enqueue(with_frame); |
|
3714 |
assert(satb_log_enqueue_with_frame != 0, "postcondition."); |
|
3715 |
} |
|
3716 |
} else { |
|
3717 |
if (satb_log_enqueue_frameless == 0) { |
|
3718 |
generate_satb_log_enqueue(with_frame); |
|
3719 |
assert(satb_log_enqueue_frameless != 0, "postcondition."); |
|
3720 |
} |
|
3721 |
} |
|
3722 |
} |
|
3723 |
||
3724 |
void MacroAssembler::g1_write_barrier_pre(Register obj, |
|
3725 |
Register index, |
|
3726 |
int offset, |
|
3727 |
Register pre_val, |
|
3728 |
Register tmp, |
|
3729 |
bool preserve_o_regs) { |
|
3730 |
Label filtered; |
|
3731 |
||
3732 |
if (obj == noreg) { |
|
3733 |
// We are not loading the previous value so make |
|
3734 |
// sure that we don't trash the value in pre_val |
|
3735 |
// with the code below. |
|
3736 |
assert_different_registers(pre_val, tmp); |
|
3737 |
} else { |
|
3738 |
// We will be loading the previous value |
|
3739 |
// in this code so... |
|
3740 |
assert(offset == 0 || index == noreg, "choose one"); |
|
3741 |
assert(pre_val == noreg, "check this code"); |
|
3742 |
} |
|
3743 |
||
3744 |
// Is marking active? |
|
34148
6efbc7ffd767
8143014: Access PtrQueue member offsets through derived classes
kbarrett
parents:
33628
diff
changeset
|
3745 |
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { |
14631 | 3746 |
ld(G2, |
3747 |
in_bytes(JavaThread::satb_mark_queue_offset() + |
|
34148
6efbc7ffd767
8143014: Access PtrQueue member offsets through derived classes
kbarrett
parents:
33628
diff
changeset
|
3748 |
SATBMarkQueue::byte_offset_of_active()), |
14631 | 3749 |
tmp); |
3750 |
} else { |
|
34148
6efbc7ffd767
8143014: Access PtrQueue member offsets through derived classes
kbarrett
parents:
33628
diff
changeset
|
3751 |
guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, |
14631 | 3752 |
"Assumption"); |
3753 |
ldsb(G2, |
|
3754 |
in_bytes(JavaThread::satb_mark_queue_offset() + |
|
34148
6efbc7ffd767
8143014: Access PtrQueue member offsets through derived classes
kbarrett
parents:
33628
diff
changeset
|
3755 |
SATBMarkQueue::byte_offset_of_active()), |
14631 | 3756 |
tmp); |
3757 |
} |
|
3758 |
||
3759 |
// Is marking active? |
|
3760 |
cmp_and_br_short(tmp, G0, Assembler::equal, Assembler::pt, filtered); |
|
3761 |
||
3762 |
// Do we need to load the previous value? |
|
3763 |
if (obj != noreg) { |
|
3764 |
// Load the previous value... |
|
3765 |
if (index == noreg) { |
|
3766 |
if (Assembler::is_simm13(offset)) { |
|
3767 |
load_heap_oop(obj, offset, tmp); |
|
3768 |
} else { |
|
3769 |
set(offset, tmp); |
|
3770 |
load_heap_oop(obj, tmp, tmp); |
|
3771 |
} |
|
3772 |
} else { |
|
3773 |
load_heap_oop(obj, index, tmp); |
|
3774 |
} |
|
3775 |
// Previous value has been loaded into tmp |
|
3776 |
pre_val = tmp; |
|
3777 |
} |
|
3778 |
||
3779 |
assert(pre_val != noreg, "must have a real register"); |
|
3780 |
||
3781 |
// Is the previous value null? |
|
3782 |
cmp_and_brx_short(pre_val, G0, Assembler::equal, Assembler::pt, filtered); |
|
3783 |
||
3784 |
// OK, it's not filtered, so we'll need to call enqueue. In the normal |
|
3785 |
// case, pre_val will be a scratch G-reg, but there are some cases in |
|
3786 |
// which it's an O-reg. In the first case, do a normal call. In the |
|
3787 |
// latter, do a save here and call the frameless version. |
|
3788 |
||
3789 |
guarantee(pre_val->is_global() || pre_val->is_out(), |
|
3790 |
"Or we need to think harder."); |
|
3791 |
||
3792 |
if (pre_val->is_global() && !preserve_o_regs) { |
|
3793 |
generate_satb_log_enqueue_if_necessary(true); // with frame |
|
3794 |
||
3795 |
call(satb_log_enqueue_with_frame); |
|
3796 |
delayed()->mov(pre_val, O0); |
|
3797 |
} else { |
|
3798 |
generate_satb_log_enqueue_if_necessary(false); // frameless |
|
3799 |
||
3800 |
save_frame(0); |
|
3801 |
call(satb_log_enqueue_frameless); |
|
3802 |
delayed()->mov(pre_val->after_save(), O0); |
|
3803 |
restore(); |
|
3804 |
} |
|
3805 |
||
3806 |
bind(filtered); |
|
3807 |
} |
|
3808 |
||
3809 |
static address dirty_card_log_enqueue = 0; |
|
3810 |
static u_char* dirty_card_log_enqueue_end = 0; |
|
3811 |
||
3812 |
// This gets to assume that o0 contains the object address. |
|
3813 |
static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) { |
|
3814 |
BufferBlob* bb = BufferBlob::create("dirty_card_enqueue", EnqueueCodeSize*2); |
|
3815 |
CodeBuffer buf(bb); |
|
3816 |
MacroAssembler masm(&buf); |
|
3817 |
#define __ masm. |
|
3818 |
address start = __ pc(); |
|
3819 |
||
20403
45a89fbcd8f7
8014555: G1: Memory ordering problem with Conc refinement and card marking
mgerdin
parents:
19979
diff
changeset
|
3820 |
Label not_already_dirty, restart, refill, young_card; |
14631 | 3821 |
|
3822 |
#ifdef _LP64 |
|
3823 |
__ srlx(O0, CardTableModRefBS::card_shift, O0); |
|
3824 |
#else |
|
3825 |
__ srl(O0, CardTableModRefBS::card_shift, O0); |
|
3826 |
#endif |
|
3827 |
AddressLiteral addrlit(byte_map_base); |
|
3828 |
__ set(addrlit, O1); // O1 := <card table base> |
|
3829 |
__ ldub(O0, O1, O2); // O2 := [O0 + O1] |
|
3830 |
||
20403
45a89fbcd8f7
8014555: G1: Memory ordering problem with Conc refinement and card marking
mgerdin
parents:
19979
diff
changeset
|
3831 |
__ cmp_and_br_short(O2, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card); |
45a89fbcd8f7
8014555: G1: Memory ordering problem with Conc refinement and card marking
mgerdin
parents:
19979
diff
changeset
|
3832 |
|
45a89fbcd8f7
8014555: G1: Memory ordering problem with Conc refinement and card marking
mgerdin
parents:
19979
diff
changeset
|
3833 |
__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); |
45a89fbcd8f7
8014555: G1: Memory ordering problem with Conc refinement and card marking
mgerdin
parents:
19979
diff
changeset
|
3834 |
__ ldub(O0, O1, O2); // O2 := [O0 + O1] |
45a89fbcd8f7
8014555: G1: Memory ordering problem with Conc refinement and card marking
mgerdin
parents:
19979
diff
changeset
|
3835 |
|
14631 | 3836 |
assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code"); |
3837 |
__ cmp_and_br_short(O2, G0, Assembler::notEqual, Assembler::pt, not_already_dirty); |
|
3838 |
||
20403
45a89fbcd8f7
8014555: G1: Memory ordering problem with Conc refinement and card marking
mgerdin
parents:
19979
diff
changeset
|
3839 |
__ bind(young_card); |
14631 | 3840 |
// We didn't take the branch, so we're already dirty: return. |
3841 |
// Use return-from-leaf |
|
3842 |
__ retl(); |
|
3843 |
__ delayed()->nop(); |
|
3844 |
||
3845 |
// Not dirty. |
|
3846 |
__ bind(not_already_dirty); |
|
3847 |
||
3848 |
// Get O0 + O1 into a reg by itself |
|
3849 |
__ add(O0, O1, O3); |
|
3850 |
||
3851 |
// First, dirty it. |
|
3852 |
__ stb(G0, O3, G0); // [cardPtr] := 0 (i.e., dirty). |
|
3853 |
||
3854 |
int dirty_card_q_index_byte_offset = |
|
3855 |
in_bytes(JavaThread::dirty_card_queue_offset() + |
|
34148
6efbc7ffd767
8143014: Access PtrQueue member offsets through derived classes
kbarrett
parents:
33628
diff
changeset
|
3856 |
DirtyCardQueue::byte_offset_of_index()); |
14631 | 3857 |
int dirty_card_q_buf_byte_offset = |
3858 |
in_bytes(JavaThread::dirty_card_queue_offset() + |
|
34148
6efbc7ffd767
8143014: Access PtrQueue member offsets through derived classes
kbarrett
parents:
33628
diff
changeset
|
3859 |
DirtyCardQueue::byte_offset_of_buf()); |
14631 | 3860 |
__ bind(restart); |
3861 |
||
34148
6efbc7ffd767
8143014: Access PtrQueue member offsets through derived classes
kbarrett
parents:
33628
diff
changeset
|
3862 |
// Load the index into the update buffer. DirtyCardQueue::_index is |
14631 | 3863 |
// a size_t so ld_ptr is appropriate here. |
3864 |
__ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, L0); |
|
3865 |
||
3866 |
// index == 0? |
|
3867 |
__ cmp_and_brx_short(L0, G0, Assembler::equal, Assembler::pn, refill); |
|
3868 |
||
3869 |
__ ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, L1); |
|
3870 |
__ sub(L0, oopSize, L0); |
|
3871 |
||
3872 |
__ st_ptr(O3, L1, L0); // [_buf + index] := I0 |
|
3873 |
// Use return-from-leaf |
|
3874 |
__ retl(); |
|
3875 |
__ delayed()->st_ptr(L0, G2_thread, dirty_card_q_index_byte_offset); |
|
3876 |
||
3877 |
__ bind(refill); |
|
3878 |
address handle_zero = |
|
3879 |
CAST_FROM_FN_PTR(address, |
|
3880 |
&DirtyCardQueueSet::handle_zero_index_for_thread); |
|
3881 |
// This should be rare enough that we can afford to save all the |
|
3882 |
// scratch registers that the calling context might be using. |
|
3883 |
__ mov(G1_scratch, L3); |
|
3884 |
__ mov(G3_scratch, L5); |
|
3885 |
// We need the value of O3 above (for the write into the buffer), so we |
|
3886 |
// save and restore it. |
|
3887 |
__ mov(O3, L6); |
|
3888 |
// Since the call will overwrite O7, we save and restore that, as well. |
|
3889 |
__ mov(O7, L4); |
|
3890 |
||
3891 |
__ call_VM_leaf(L7_thread_cache, handle_zero, G2_thread); |
|
3892 |
__ mov(L3, G1_scratch); |
|
3893 |
__ mov(L5, G3_scratch); |
|
3894 |
__ mov(L6, O3); |
|
3895 |
__ br(Assembler::always, /*annul*/false, Assembler::pt, restart); |
|
3896 |
__ delayed()->mov(L4, O7); |
|
3897 |
||
3898 |
dirty_card_log_enqueue = start; |
|
3899 |
dirty_card_log_enqueue_end = __ pc(); |
|
3900 |
// XXX Should have a guarantee here about not going off the end! |
|
3901 |
// Does it already do so? Do an experiment... |
|
3902 |
||
3903 |
#undef __ |
|
3904 |
||
3905 |
} |
|
3906 |
||
3907 |
static inline void |
|
3908 |
generate_dirty_card_log_enqueue_if_necessary(jbyte* byte_map_base) { |
|
3909 |
if (dirty_card_log_enqueue == 0) { |
|
3910 |
generate_dirty_card_log_enqueue(byte_map_base); |
|
3911 |
assert(dirty_card_log_enqueue != 0, "postcondition."); |
|
3912 |
} |
|
3913 |
} |
|
3914 |
||
3915 |
||
3916 |
void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val, Register tmp) { |
|
3917 |
||
3918 |
Label filtered; |
|
3919 |
MacroAssembler* post_filter_masm = this; |
|
3920 |
||
3921 |
if (new_val == G0) return; |
|
3922 |
||
29325 | 3923 |
G1SATBCardTableLoggingModRefBS* bs = |
3924 |
barrier_set_cast<G1SATBCardTableLoggingModRefBS>(Universe::heap()->barrier_set()); |
|
14631 | 3925 |
|
3926 |
if (G1RSBarrierRegionFilter) { |
|
3927 |
xor3(store_addr, new_val, tmp); |
|
3928 |
#ifdef _LP64 |
|
3929 |
srlx(tmp, HeapRegion::LogOfHRGrainBytes, tmp); |
|
3930 |
#else |
|
3931 |
srl(tmp, HeapRegion::LogOfHRGrainBytes, tmp); |
|
3932 |
#endif |
|
3933 |
||
3934 |
// XXX Should I predict this taken or not? Does it matter? |
|
3935 |
cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pt, filtered); |
|
3936 |
} |
|
3937 |
||
3938 |
// If the "store_addr" register is an "in" or "local" register, move it to |
|
3939 |
// a scratch reg so we can pass it as an argument. |
|
3940 |
bool use_scr = !(store_addr->is_global() || store_addr->is_out()); |
|
3941 |
// Pick a scratch register different from "tmp". |
|
3942 |
Register scr = (tmp == G1_scratch ? G3_scratch : G1_scratch); |
|
3943 |
// Make sure we use up the delay slot! |
|
3944 |
if (use_scr) { |
|
3945 |
post_filter_masm->mov(store_addr, scr); |
|
3946 |
} else { |
|
3947 |
post_filter_masm->nop(); |
|
3948 |
} |
|
3949 |
generate_dirty_card_log_enqueue_if_necessary(bs->byte_map_base); |
|
3950 |
save_frame(0); |
|
3951 |
call(dirty_card_log_enqueue); |
|
3952 |
if (use_scr) { |
|
3953 |
delayed()->mov(scr, O0); |
|
3954 |
} else { |
|
3955 |
delayed()->mov(store_addr->after_save(), O0); |
|
3956 |
} |
|
3957 |
restore(); |
|
3958 |
||
3959 |
bind(filtered); |
|
3960 |
} |
|
3961 |
||
15482
470d0b0c09f1
8005915: Unify SERIALGC and INCLUDE_ALTERNATE_GCS
jprovino
parents:
15116
diff
changeset
|
3962 |
#endif // INCLUDE_ALL_GCS |
14631 | 3963 |
/////////////////////////////////////////////////////////////////////////////////// |
3964 |
||
3965 |
void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_val, Register tmp) { |
|
3966 |
// If we're writing constant NULL, we can skip the write barrier. |
|
3967 |
if (new_val == G0) return; |
|
29325 | 3968 |
CardTableModRefBS* bs = |
3969 |
barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set()); |
|
32596
8feecdee3156
8072817: CardTableExtension kind() should be BarrierSet::CardTableExtension
kbarrett
parents:
31849
diff
changeset
|
3970 |
assert(bs->kind() == BarrierSet::CardTableForRS || |
14631 | 3971 |
bs->kind() == BarrierSet::CardTableExtension, "wrong barrier"); |
3972 |
card_table_write(bs->byte_map_base, tmp, store_addr); |
|
3973 |
} |
|
3974 |
||
38074
8475fdc6dcc3
8154580: Save mirror in interpreter frame to enable cleanups of CLDClosure
coleenp
parents:
38042
diff
changeset
|
3975 |
void MacroAssembler::load_mirror(Register mirror, Register method) { |
8475fdc6dcc3
8154580: Save mirror in interpreter frame to enable cleanups of CLDClosure
coleenp
parents:
38042
diff
changeset
|
3976 |
const int mirror_offset = in_bytes(Klass::java_mirror_offset()); |
8475fdc6dcc3
8154580: Save mirror in interpreter frame to enable cleanups of CLDClosure
coleenp
parents:
38042
diff
changeset
|
3977 |
ld_ptr(method, in_bytes(Method::const_offset()), mirror); |
8475fdc6dcc3
8154580: Save mirror in interpreter frame to enable cleanups of CLDClosure
coleenp
parents:
38042
diff
changeset
|
3978 |
ld_ptr(mirror, in_bytes(ConstMethod::constants_offset()), mirror); |
8475fdc6dcc3
8154580: Save mirror in interpreter frame to enable cleanups of CLDClosure
coleenp
parents:
38042
diff
changeset
|
3979 |
ld_ptr(mirror, ConstantPool::pool_holder_offset_in_bytes(), mirror); |
8475fdc6dcc3
8154580: Save mirror in interpreter frame to enable cleanups of CLDClosure
coleenp
parents:
38042
diff
changeset
|
3980 |
ld_ptr(mirror, mirror_offset, mirror); |
8475fdc6dcc3
8154580: Save mirror in interpreter frame to enable cleanups of CLDClosure
coleenp
parents:
38042
diff
changeset
|
3981 |
} |
8475fdc6dcc3
8154580: Save mirror in interpreter frame to enable cleanups of CLDClosure
coleenp
parents:
38042
diff
changeset
|
3982 |
|
14631 | 3983 |
void MacroAssembler::load_klass(Register src_oop, Register klass) { |
3984 |
// The number of bytes in this code is used by |
|
3985 |
// MachCallDynamicJavaNode::ret_addr_offset() |
|
3986 |
// if this changes, change that. |
|
19979
ebe1dbb6e1aa
8015107: NPG: Use consistent naming for metaspace concepts
ehelin
parents:
19319
diff
changeset
|
3987 |
if (UseCompressedClassPointers) { |
14631 | 3988 |
lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass); |
3989 |
decode_klass_not_null(klass); |
|
3990 |
} else { |
|
3991 |
ld_ptr(src_oop, oopDesc::klass_offset_in_bytes(), klass); |
|
3992 |
} |
|
3993 |
} |
|
3994 |
||
3995 |
void MacroAssembler::store_klass(Register klass, Register dst_oop) { |
|
19979
ebe1dbb6e1aa
8015107: NPG: Use consistent naming for metaspace concepts
ehelin
parents:
19319
diff
changeset
|
3996 |
if (UseCompressedClassPointers) { |
14631 | 3997 |
assert(dst_oop != klass, "not enough registers"); |
3998 |
encode_klass_not_null(klass); |
|
3999 |
st(klass, dst_oop, oopDesc::klass_offset_in_bytes()); |
|
4000 |
} else { |
|
4001 |
st_ptr(klass, dst_oop, oopDesc::klass_offset_in_bytes()); |
|
4002 |
} |
|
4003 |
} |
|
4004 |
||
4005 |
void MacroAssembler::store_klass_gap(Register s, Register d) { |
|
19979
ebe1dbb6e1aa
8015107: NPG: Use consistent naming for metaspace concepts
ehelin
parents:
19319
diff
changeset
|
4006 |
if (UseCompressedClassPointers) { |
14631 | 4007 |
assert(s != d, "not enough registers"); |
4008 |
st(s, d, oopDesc::klass_gap_offset_in_bytes()); |
|
4009 |
} |
|
4010 |
} |
|
4011 |
||
4012 |
void MacroAssembler::load_heap_oop(const Address& s, Register d) { |
|
4013 |
if (UseCompressedOops) { |
|
4014 |
lduw(s, d); |
|
4015 |
decode_heap_oop(d); |
|
4016 |
} else { |
|
4017 |
ld_ptr(s, d); |
|
4018 |
} |
|
4019 |
} |
|
4020 |
||
4021 |
void MacroAssembler::load_heap_oop(Register s1, Register s2, Register d) { |
|
4022 |
if (UseCompressedOops) { |
|
4023 |
lduw(s1, s2, d); |
|
4024 |
decode_heap_oop(d, d); |
|
4025 |
} else { |
|
4026 |
ld_ptr(s1, s2, d); |
|
4027 |
} |
|
4028 |
} |
|
4029 |
||
4030 |
void MacroAssembler::load_heap_oop(Register s1, int simm13a, Register d) { |
|
4031 |
if (UseCompressedOops) { |
|
4032 |
lduw(s1, simm13a, d); |
|
4033 |
decode_heap_oop(d, d); |
|
4034 |
} else { |
|
4035 |
ld_ptr(s1, simm13a, d); |
|
4036 |
} |
|
4037 |
} |
|
4038 |
||
4039 |
void MacroAssembler::load_heap_oop(Register s1, RegisterOrConstant s2, Register d) { |
|
4040 |
if (s2.is_constant()) load_heap_oop(s1, s2.as_constant(), d); |
|
4041 |
else load_heap_oop(s1, s2.as_register(), d); |
|
4042 |
} |
|
4043 |
||
4044 |
void MacroAssembler::store_heap_oop(Register d, Register s1, Register s2) { |
|
4045 |
if (UseCompressedOops) { |
|
4046 |
assert(s1 != d && s2 != d, "not enough registers"); |
|
4047 |
encode_heap_oop(d); |
|
4048 |
st(d, s1, s2); |
|
4049 |
} else { |
|
4050 |
st_ptr(d, s1, s2); |
|
4051 |
} |
|
4052 |
} |
|
4053 |
||
4054 |
void MacroAssembler::store_heap_oop(Register d, Register s1, int simm13a) { |
|
4055 |
if (UseCompressedOops) { |
|
4056 |
assert(s1 != d, "not enough registers"); |
|
4057 |
encode_heap_oop(d); |
|
4058 |
st(d, s1, simm13a); |
|
4059 |
} else { |
|
4060 |
st_ptr(d, s1, simm13a); |
|
4061 |
} |
|
4062 |
} |
|
4063 |
||
4064 |
void MacroAssembler::store_heap_oop(Register d, const Address& a, int offset) { |
|
4065 |
if (UseCompressedOops) { |
|
4066 |
assert(a.base() != d, "not enough registers"); |
|
4067 |
encode_heap_oop(d); |
|
4068 |
st(d, a, offset); |
|
4069 |
} else { |
|
4070 |
st_ptr(d, a, offset); |
|
4071 |
} |
|
4072 |
} |
|
4073 |
||
4074 |
||
4075 |
void MacroAssembler::encode_heap_oop(Register src, Register dst) { |
|
4076 |
assert (UseCompressedOops, "must be compressed"); |
|
4077 |
assert (Universe::heap() != NULL, "java heap should be initialized"); |
|
4078 |
assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
|
4079 |
verify_oop(src); |
|
4080 |
if (Universe::narrow_oop_base() == NULL) { |
|
4081 |
srlx(src, LogMinObjAlignmentInBytes, dst); |
|
4082 |
return; |
|
4083 |
} |
|
4084 |
Label done; |
|
4085 |
if (src == dst) { |
|
4086 |
// optimize for frequent case src == dst |
|
4087 |
bpr(rc_nz, true, Assembler::pt, src, done); |
|
4088 |
delayed() -> sub(src, G6_heapbase, dst); // annuled if not taken |
|
4089 |
bind(done); |
|
4090 |
srlx(src, LogMinObjAlignmentInBytes, dst); |
|
4091 |
} else { |
|
4092 |
bpr(rc_z, false, Assembler::pn, src, done); |
|
4093 |
delayed() -> mov(G0, dst); |
|
4094 |
// could be moved before branch, and annulate delay, |
|
4095 |
// but may add some unneeded work decoding null |
|
4096 |
sub(src, G6_heapbase, dst); |
|
4097 |
srlx(dst, LogMinObjAlignmentInBytes, dst); |
|
4098 |
bind(done); |
|
4099 |
} |
|
4100 |
} |
|
4101 |
||
4102 |
||
4103 |
void MacroAssembler::encode_heap_oop_not_null(Register r) { |
|
4104 |
assert (UseCompressedOops, "must be compressed"); |
|
4105 |
assert (Universe::heap() != NULL, "java heap should be initialized"); |
|
4106 |
assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
|
4107 |
verify_oop(r); |
|
4108 |
if (Universe::narrow_oop_base() != NULL) |
|
4109 |
sub(r, G6_heapbase, r); |
|
4110 |
srlx(r, LogMinObjAlignmentInBytes, r); |
|
4111 |
} |
|
4112 |
||
4113 |
void MacroAssembler::encode_heap_oop_not_null(Register src, Register dst) { |
|
4114 |
assert (UseCompressedOops, "must be compressed"); |
|
4115 |
assert (Universe::heap() != NULL, "java heap should be initialized"); |
|
4116 |
assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
|
4117 |
verify_oop(src); |
|
4118 |
if (Universe::narrow_oop_base() == NULL) { |
|
4119 |
srlx(src, LogMinObjAlignmentInBytes, dst); |
|
4120 |
} else { |
|
4121 |
sub(src, G6_heapbase, dst); |
|
4122 |
srlx(dst, LogMinObjAlignmentInBytes, dst); |
|
4123 |
} |
|
4124 |
} |
|
4125 |
||
4126 |
// Same algorithm as oops.inline.hpp decode_heap_oop. |
|
4127 |
void MacroAssembler::decode_heap_oop(Register src, Register dst) { |
|
4128 |
assert (UseCompressedOops, "must be compressed"); |
|
4129 |
assert (Universe::heap() != NULL, "java heap should be initialized"); |
|
4130 |
assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
|
4131 |
sllx(src, LogMinObjAlignmentInBytes, dst); |
|
4132 |
if (Universe::narrow_oop_base() != NULL) { |
|
4133 |
Label done; |
|
4134 |
bpr(rc_nz, true, Assembler::pt, dst, done); |
|
4135 |
delayed() -> add(dst, G6_heapbase, dst); // annuled if not taken |
|
4136 |
bind(done); |
|
4137 |
} |
|
4138 |
verify_oop(dst); |
|
4139 |
} |
|
4140 |
||
4141 |
void MacroAssembler::decode_heap_oop_not_null(Register r) { |
|
4142 |
// Do not add assert code to this unless you change vtableStubs_sparc.cpp |
|
4143 |
// pd_code_size_limit. |
|
4144 |
// Also do not verify_oop as this is called by verify_oop. |
|
4145 |
assert (UseCompressedOops, "must be compressed"); |
|
4146 |
assert (Universe::heap() != NULL, "java heap should be initialized"); |
|
4147 |
assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
|
4148 |
sllx(r, LogMinObjAlignmentInBytes, r); |
|
4149 |
if (Universe::narrow_oop_base() != NULL) |
|
4150 |
add(r, G6_heapbase, r); |
|
4151 |
} |
|
4152 |
||
4153 |
void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) { |
|
4154 |
// Do not add assert code to this unless you change vtableStubs_sparc.cpp |
|
4155 |
// pd_code_size_limit. |
|
4156 |
// Also do not verify_oop as this is called by verify_oop. |
|
4157 |
assert (UseCompressedOops, "must be compressed"); |
|
4158 |
assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
|
4159 |
sllx(src, LogMinObjAlignmentInBytes, dst); |
|
4160 |
if (Universe::narrow_oop_base() != NULL) |
|
4161 |
add(dst, G6_heapbase, dst); |
|
4162 |
} |
|
4163 |
||
4164 |
void MacroAssembler::encode_klass_not_null(Register r) { |
|
19979
ebe1dbb6e1aa
8015107: NPG: Use consistent naming for metaspace concepts
ehelin
parents:
19319
diff
changeset
|
4165 |
assert (UseCompressedClassPointers, "must be compressed"); |
21188
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4166 |
if (Universe::narrow_klass_base() != NULL) { |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4167 |
assert(r != G6_heapbase, "bad register choice"); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4168 |
set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4169 |
sub(r, G6_heapbase, r); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4170 |
if (Universe::narrow_klass_shift() != 0) { |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4171 |
assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4172 |
srlx(r, LogKlassAlignmentInBytes, r); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4173 |
} |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4174 |
reinit_heapbase(); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4175 |
} else { |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4176 |
assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4177 |
srlx(r, Universe::narrow_klass_shift(), r); |
19319
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18446
diff
changeset
|
4178 |
} |
14631 | 4179 |
} |
4180 |
||
4181 |
void MacroAssembler::encode_klass_not_null(Register src, Register dst) { |
|
19319
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18446
diff
changeset
|
4182 |
if (src == dst) { |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18446
diff
changeset
|
4183 |
encode_klass_not_null(src); |
14631 | 4184 |
} else { |
19979
ebe1dbb6e1aa
8015107: NPG: Use consistent naming for metaspace concepts
ehelin
parents:
19319
diff
changeset
|
4185 |
assert (UseCompressedClassPointers, "must be compressed"); |
21188
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4186 |
if (Universe::narrow_klass_base() != NULL) { |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4187 |
set((intptr_t)Universe::narrow_klass_base(), dst); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4188 |
sub(src, dst, dst); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4189 |
if (Universe::narrow_klass_shift() != 0) { |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4190 |
srlx(dst, LogKlassAlignmentInBytes, dst); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4191 |
} |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4192 |
} else { |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4193 |
// shift src into dst |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4194 |
assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4195 |
srlx(src, Universe::narrow_klass_shift(), dst); |
19319
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18446
diff
changeset
|
4196 |
} |
14631 | 4197 |
} |
4198 |
} |
|
4199 |
||
19319
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18446
diff
changeset
|
4200 |
// Function instr_size_for_decode_klass_not_null() counts the instructions |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18446
diff
changeset
|
4201 |
// generated by decode_klass_not_null() and reinit_heapbase(). Hence, if |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18446
diff
changeset
|
4202 |
// the instructions they generate change, then this method needs to be updated. |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18446
diff
changeset
|
4203 |
int MacroAssembler::instr_size_for_decode_klass_not_null() { |
19979
ebe1dbb6e1aa
8015107: NPG: Use consistent naming for metaspace concepts
ehelin
parents:
19319
diff
changeset
|
4204 |
assert (UseCompressedClassPointers, "only for compressed klass ptrs"); |
21188
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4205 |
int num_instrs = 1; // shift src,dst or add |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4206 |
if (Universe::narrow_klass_base() != NULL) { |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4207 |
// set + add + set |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4208 |
num_instrs += insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) + |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4209 |
insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base()); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4210 |
if (Universe::narrow_klass_shift() != 0) { |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4211 |
num_instrs += 1; // sllx |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4212 |
} |
19319
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18446
diff
changeset
|
4213 |
} |
21188
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4214 |
return num_instrs * BytesPerInstWord; |
19319
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18446
diff
changeset
|
4215 |
} |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18446
diff
changeset
|
4216 |
|
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18446
diff
changeset
|
4217 |
// !!! If the instructions that get generated here change then function |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18446
diff
changeset
|
4218 |
// instr_size_for_decode_klass_not_null() needs to get updated. |
14631 | 4219 |
void MacroAssembler::decode_klass_not_null(Register r) { |
4220 |
// Do not add assert code to this unless you change vtableStubs_sparc.cpp |
|
4221 |
// pd_code_size_limit. |
|
19979
ebe1dbb6e1aa
8015107: NPG: Use consistent naming for metaspace concepts
ehelin
parents:
19319
diff
changeset
|
4222 |
assert (UseCompressedClassPointers, "must be compressed"); |
21188
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4223 |
if (Universe::narrow_klass_base() != NULL) { |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4224 |
assert(r != G6_heapbase, "bad register choice"); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4225 |
set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4226 |
if (Universe::narrow_klass_shift() != 0) |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4227 |
sllx(r, LogKlassAlignmentInBytes, r); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4228 |
add(r, G6_heapbase, r); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4229 |
reinit_heapbase(); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4230 |
} else { |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4231 |
assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4232 |
sllx(r, Universe::narrow_klass_shift(), r); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4233 |
} |
14631 | 4234 |
} |
4235 |
||
4236 |
void MacroAssembler::decode_klass_not_null(Register src, Register dst) { |
|
19319
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18446
diff
changeset
|
4237 |
if (src == dst) { |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18446
diff
changeset
|
4238 |
decode_klass_not_null(src); |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18446
diff
changeset
|
4239 |
} else { |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18446
diff
changeset
|
4240 |
// Do not add assert code to this unless you change vtableStubs_sparc.cpp |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18446
diff
changeset
|
4241 |
// pd_code_size_limit. |
19979
ebe1dbb6e1aa
8015107: NPG: Use consistent naming for metaspace concepts
ehelin
parents:
19319
diff
changeset
|
4242 |
assert (UseCompressedClassPointers, "must be compressed"); |
21188
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4243 |
if (Universe::narrow_klass_base() != NULL) { |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4244 |
if (Universe::narrow_klass_shift() != 0) { |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4245 |
assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice"); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4246 |
set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4247 |
sllx(src, LogKlassAlignmentInBytes, dst); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4248 |
add(dst, G6_heapbase, dst); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4249 |
reinit_heapbase(); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4250 |
} else { |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4251 |
set((intptr_t)Universe::narrow_klass_base(), dst); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4252 |
add(src, dst, dst); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4253 |
} |
19319
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18446
diff
changeset
|
4254 |
} else { |
21188
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4255 |
// shift/mov src into dst. |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4256 |
assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
4257 |
sllx(src, Universe::narrow_klass_shift(), dst); |
19319
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18446
diff
changeset
|
4258 |
} |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18446
diff
changeset
|
4259 |
} |
14631 | 4260 |
} |
4261 |
||
4262 |
void MacroAssembler::reinit_heapbase() { |
|
19979
ebe1dbb6e1aa
8015107: NPG: Use consistent naming for metaspace concepts
ehelin
parents:
19319
diff
changeset
|
4263 |
if (UseCompressedOops || UseCompressedClassPointers) { |
19319
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18446
diff
changeset
|
4264 |
if (Universe::heap() != NULL) { |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18446
diff
changeset
|
4265 |
set((intptr_t)Universe::narrow_ptrs_base(), G6_heapbase); |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18446
diff
changeset
|
4266 |
} else { |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18446
diff
changeset
|
4267 |
AddressLiteral base(Universe::narrow_ptrs_base_addr()); |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18446
diff
changeset
|
4268 |
load_ptr_contents(base, G6_heapbase); |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18446
diff
changeset
|
4269 |
} |
14631 | 4270 |
} |
4271 |
} |
|
4272 |
||
33628 | 4273 |
#ifdef COMPILER2 |
4274 |
||
4275 |
// Compress char[] to byte[] by compressing 16 bytes at once. Return 0 on failure. |
|
4276 |
void MacroAssembler::string_compress_16(Register src, Register dst, Register cnt, Register result, |
|
4277 |
Register tmp1, Register tmp2, Register tmp3, Register tmp4, |
|
4278 |
FloatRegister ftmp1, FloatRegister ftmp2, FloatRegister ftmp3, Label& Ldone) { |
|
4279 |
Label Lloop, Lslow; |
|
4280 |
assert(UseVIS >= 3, "VIS3 is required"); |
|
4281 |
assert_different_registers(src, dst, cnt, tmp1, tmp2, tmp3, tmp4, result); |
|
4282 |
assert_different_registers(ftmp1, ftmp2, ftmp3); |
|
4283 |
||
4284 |
// Check if cnt >= 8 (= 16 bytes) |
|
4285 |
cmp(cnt, 8); |
|
4286 |
br(Assembler::less, false, Assembler::pn, Lslow); |
|
4287 |
delayed()->mov(cnt, result); // copy count |
|
4288 |
||
4289 |
// Check for 8-byte alignment of src and dst |
|
4290 |
or3(src, dst, tmp1); |
|
4291 |
andcc(tmp1, 7, G0); |
|
4292 |
br(Assembler::notZero, false, Assembler::pn, Lslow); |
|
4293 |
delayed()->nop(); |
|
4294 |
||
4295 |
// Set mask for bshuffle instruction |
|
4296 |
Register mask = tmp4; |
|
4297 |
set(0x13579bdf, mask); |
|
4298 |
bmask(mask, G0, G0); |
|
4299 |
||
4300 |
// Set mask to 0xff00 ff00 ff00 ff00 to check for non-latin1 characters |
|
4301 |
Assembler::sethi(0xff00fc00, mask); // mask = 0x0000 0000 ff00 fc00 |
|
4302 |
add(mask, 0x300, mask); // mask = 0x0000 0000 ff00 ff00 |
|
4303 |
sllx(mask, 32, tmp1); // tmp1 = 0xff00 ff00 0000 0000 |
|
4304 |
or3(mask, tmp1, mask); // mask = 0xff00 ff00 ff00 ff00 |
|
4305 |
||
4306 |
// Load first 8 bytes |
|
4307 |
ldx(src, 0, tmp1); |
|
4308 |
||
4309 |
bind(Lloop); |
|
4310 |
// Load next 8 bytes |
|
4311 |
ldx(src, 8, tmp2); |
|
4312 |
||
4313 |
// Check for non-latin1 character by testing if the most significant byte of a char is set. |
|
4314 |
// Although we have to move the data between integer and floating point registers, this is |
|
4315 |
// still faster than the corresponding VIS instructions (ford/fand/fcmpd). |
|
4316 |
or3(tmp1, tmp2, tmp3); |
|
4317 |
btst(tmp3, mask); |
|
4318 |
// annul zeroing if branch is not taken to preserve original count |
|
4319 |
brx(Assembler::notZero, true, Assembler::pn, Ldone); |
|
4320 |
delayed()->mov(G0, result); // 0 - failed |
|
4321 |
||
4322 |
// Move bytes into float register |
|
4323 |
movxtod(tmp1, ftmp1); |
|
4324 |
movxtod(tmp2, ftmp2); |
|
4325 |
||
4326 |
// Compress by copying one byte per char from ftmp1 and ftmp2 to ftmp3 |
|
4327 |
bshuffle(ftmp1, ftmp2, ftmp3); |
|
4328 |
stf(FloatRegisterImpl::D, ftmp3, dst, 0); |
|
4329 |
||
4330 |
// Increment addresses and decrement count |
|
4331 |
inc(src, 16); |
|
4332 |
inc(dst, 8); |
|
4333 |
dec(cnt, 8); |
|
4334 |
||
4335 |
cmp(cnt, 8); |
|
4336 |
// annul LDX if branch is not taken to prevent access past end of string |
|
4337 |
br(Assembler::greaterEqual, true, Assembler::pt, Lloop); |
|
4338 |
delayed()->ldx(src, 0, tmp1); |
|
4339 |
||
4340 |
// Fallback to slow version |
|
4341 |
bind(Lslow); |
|
4342 |
} |
|
4343 |
||
4344 |
// Compress char[] to byte[]. Return 0 on failure. |
|
4345 |
void MacroAssembler::string_compress(Register src, Register dst, Register cnt, Register result, Register tmp, Label& Ldone) { |
|
4346 |
Label Lloop; |
|
4347 |
assert_different_registers(src, dst, cnt, tmp, result); |
|
4348 |
||
4349 |
lduh(src, 0, tmp); |
|
4350 |
||
4351 |
bind(Lloop); |
|
4352 |
inc(src, sizeof(jchar)); |
|
4353 |
cmp(tmp, 0xff); |
|
4354 |
// annul zeroing if branch is not taken to preserve original count |
|
4355 |
br(Assembler::greater, true, Assembler::pn, Ldone); // don't check xcc |
|
4356 |
delayed()->mov(G0, result); // 0 - failed |
|
4357 |
deccc(cnt); |
|
4358 |
stb(tmp, dst, 0); |
|
4359 |
inc(dst); |
|
4360 |
// annul LDUH if branch is not taken to prevent access past end of string |
|
4361 |
br(Assembler::notZero, true, Assembler::pt, Lloop); |
|
4362 |
delayed()->lduh(src, 0, tmp); // hoisted |
|
4363 |
} |
|
4364 |
||
4365 |
// Inflate byte[] to char[] by inflating 16 bytes at once. |
|
4366 |
void MacroAssembler::string_inflate_16(Register src, Register dst, Register cnt, Register tmp, |
|
4367 |
FloatRegister ftmp1, FloatRegister ftmp2, FloatRegister ftmp3, FloatRegister ftmp4, Label& Ldone) { |
|
4368 |
Label Lloop, Lslow; |
|
4369 |
assert(UseVIS >= 3, "VIS3 is required"); |
|
4370 |
assert_different_registers(src, dst, cnt, tmp); |
|
4371 |
assert_different_registers(ftmp1, ftmp2, ftmp3, ftmp4); |
|
4372 |
||
4373 |
// Check if cnt >= 8 (= 16 bytes) |
|
4374 |
cmp(cnt, 8); |
|
4375 |
br(Assembler::less, false, Assembler::pn, Lslow); |
|
4376 |
delayed()->nop(); |
|
4377 |
||
4378 |
// Check for 8-byte alignment of src and dst |
|
4379 |
or3(src, dst, tmp); |
|
4380 |
andcc(tmp, 7, G0); |
|
4381 |
br(Assembler::notZero, false, Assembler::pn, Lslow); |
|
4382 |
// Initialize float register to zero |
|
4383 |
FloatRegister zerof = ftmp4; |
|
4384 |
delayed()->fzero(FloatRegisterImpl::D, zerof); |
|
4385 |
||
4386 |
// Load first 8 bytes |
|
4387 |
ldf(FloatRegisterImpl::D, src, 0, ftmp1); |
|
4388 |
||
4389 |
bind(Lloop); |
|
4390 |
inc(src, 8); |
|
4391 |
dec(cnt, 8); |
|
4392 |
||
4393 |
// Inflate the string by interleaving each byte from the source array |
|
4394 |
// with a zero byte and storing the result in the destination array. |
|
4395 |
fpmerge(zerof, ftmp1->successor(), ftmp2); |
|
4396 |
stf(FloatRegisterImpl::D, ftmp2, dst, 8); |
|
4397 |
fpmerge(zerof, ftmp1, ftmp3); |
|
4398 |
stf(FloatRegisterImpl::D, ftmp3, dst, 0); |
|
4399 |
||
4400 |
inc(dst, 16); |
|
4401 |
||
4402 |
cmp(cnt, 8); |
|
4403 |
// annul LDX if branch is not taken to prevent access past end of string |
|
4404 |
br(Assembler::greaterEqual, true, Assembler::pt, Lloop); |
|
4405 |
delayed()->ldf(FloatRegisterImpl::D, src, 0, ftmp1); |
|
4406 |
||
4407 |
// Fallback to slow version |
|
4408 |
bind(Lslow); |
|
4409 |
} |
|
4410 |
||
4411 |
// Inflate byte[] to char[]. |
|
4412 |
void MacroAssembler::string_inflate(Register src, Register dst, Register cnt, Register tmp, Label& Ldone) { |
|
4413 |
Label Loop; |
|
4414 |
assert_different_registers(src, dst, cnt, tmp); |
|
4415 |
||
4416 |
ldub(src, 0, tmp); |
|
4417 |
bind(Loop); |
|
4418 |
inc(src); |
|
4419 |
deccc(cnt); |
|
4420 |
sth(tmp, dst, 0); |
|
4421 |
inc(dst, sizeof(jchar)); |
|
4422 |
// annul LDUB if branch is not taken to prevent access past end of string |
|
4423 |
br(Assembler::notZero, true, Assembler::pt, Loop); |
|
4424 |
delayed()->ldub(src, 0, tmp); // hoisted |
|
4425 |
} |
|
4426 |
||
4427 |
void MacroAssembler::string_compare(Register str1, Register str2, |
|
4428 |
Register cnt1, Register cnt2, |
|
4429 |
Register tmp1, Register tmp2, |
|
4430 |
Register result, int ae) { |
|
4431 |
Label Ldone, Lloop; |
|
4432 |
assert_different_registers(str1, str2, cnt1, cnt2, tmp1, result); |
|
4433 |
int stride1, stride2; |
|
4434 |
||
4435 |
// Note: Making use of the fact that compareTo(a, b) == -compareTo(b, a) |
|
4436 |
// we interchange str1 and str2 in the UL case and negate the result. |
|
4437 |
// Like this, str1 is always latin1 encoded, expect for the UU case. |
|
4438 |
||
4439 |
if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { |
|
4440 |
srl(cnt2, 1, cnt2); |
|
4441 |
} |
|
4442 |
||
4443 |
// See if the lengths are different, and calculate min in cnt1. |
|
4444 |
// Save diff in case we need it for a tie-breaker. |
|
4445 |
Label Lskip; |
|
4446 |
Register diff = tmp1; |
|
4447 |
subcc(cnt1, cnt2, diff); |
|
4448 |
br(Assembler::greater, true, Assembler::pt, Lskip); |
|
4449 |
// cnt2 is shorter, so use its count: |
|
4450 |
delayed()->mov(cnt2, cnt1); |
|
4451 |
bind(Lskip); |
|
4452 |
||
4453 |
// Rename registers |
|
4454 |
Register limit1 = cnt1; |
|
4455 |
Register limit2 = limit1; |
|
4456 |
Register chr1 = result; |
|
4457 |
Register chr2 = cnt2; |
|
4458 |
if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { |
|
4459 |
// We need an additional register to keep track of two limits |
|
4460 |
assert_different_registers(str1, str2, cnt1, cnt2, tmp1, tmp2, result); |
|
4461 |
limit2 = tmp2; |
|
4462 |
} |
|
4463 |
||
4464 |
// Is the minimum length zero? |
|
4465 |
cmp(limit1, (int)0); // use cast to resolve overloading ambiguity |
|
4466 |
br(Assembler::equal, true, Assembler::pn, Ldone); |
|
4467 |
// result is difference in lengths |
|
4468 |
if (ae == StrIntrinsicNode::UU) { |
|
4469 |
delayed()->sra(diff, 1, result); // Divide by 2 to get number of chars |
|
4470 |
} else { |
|
4471 |
delayed()->mov(diff, result); |
|
4472 |
} |
|
4473 |
||
4474 |
// Load first characters |
|
4475 |
if (ae == StrIntrinsicNode::LL) { |
|
4476 |
stride1 = stride2 = sizeof(jbyte); |
|
4477 |
ldub(str1, 0, chr1); |
|
4478 |
ldub(str2, 0, chr2); |
|
4479 |
} else if (ae == StrIntrinsicNode::UU) { |
|
4480 |
stride1 = stride2 = sizeof(jchar); |
|
4481 |
lduh(str1, 0, chr1); |
|
4482 |
lduh(str2, 0, chr2); |
|
4483 |
} else { |
|
4484 |
stride1 = sizeof(jbyte); |
|
4485 |
stride2 = sizeof(jchar); |
|
4486 |
ldub(str1, 0, chr1); |
|
4487 |
lduh(str2, 0, chr2); |
|
4488 |
} |
|
4489 |
||
4490 |
// Compare first characters |
|
4491 |
subcc(chr1, chr2, chr1); |
|
4492 |
br(Assembler::notZero, false, Assembler::pt, Ldone); |
|
4493 |
assert(chr1 == result, "result must be pre-placed"); |
|
4494 |
delayed()->nop(); |
|
4495 |
||
4496 |
// Check if the strings start at same location |
|
4497 |
cmp(str1, str2); |
|
4498 |
brx(Assembler::equal, true, Assembler::pn, Ldone); |
|
4499 |
delayed()->mov(G0, result); // result is zero |
|
4500 |
||
4501 |
// We have no guarantee that on 64 bit the higher half of limit is 0 |
|
4502 |
signx(limit1); |
|
4503 |
||
4504 |
// Get limit |
|
4505 |
if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { |
|
4506 |
sll(limit1, 1, limit2); |
|
4507 |
subcc(limit2, stride2, chr2); |
|
4508 |
} |
|
4509 |
subcc(limit1, stride1, chr1); |
|
4510 |
br(Assembler::zero, true, Assembler::pn, Ldone); |
|
4511 |
// result is difference in lengths |
|
4512 |
if (ae == StrIntrinsicNode::UU) { |
|
4513 |
delayed()->sra(diff, 1, result); // Divide by 2 to get number of chars |
|
4514 |
} else { |
|
4515 |
delayed()->mov(diff, result); |
|
4516 |
} |
|
4517 |
||
4518 |
// Shift str1 and str2 to the end of the arrays, negate limit |
|
4519 |
add(str1, limit1, str1); |
|
4520 |
add(str2, limit2, str2); |
|
4521 |
neg(chr1, limit1); // limit1 = -(limit1-stride1) |
|
4522 |
if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { |
|
4523 |
neg(chr2, limit2); // limit2 = -(limit2-stride2) |
|
4524 |
} |
|
4525 |
||
4526 |
// Compare the rest of the characters |
|
38142
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4527 |
load_sized_value(Address(str1, limit1), chr1, (ae == StrIntrinsicNode::UU) ? 2 : 1, false); |
33628 | 4528 |
|
4529 |
bind(Lloop); |
|
38142
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4530 |
load_sized_value(Address(str2, limit2), chr2, (ae == StrIntrinsicNode::LL) ? 1 : 2, false); |
33628 | 4531 |
|
4532 |
subcc(chr1, chr2, chr1); |
|
4533 |
br(Assembler::notZero, false, Assembler::pt, Ldone); |
|
4534 |
assert(chr1 == result, "result must be pre-placed"); |
|
4535 |
delayed()->inccc(limit1, stride1); |
|
4536 |
if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { |
|
4537 |
inccc(limit2, stride2); |
|
4538 |
} |
|
4539 |
||
4540 |
// annul LDUB if branch is not taken to prevent access past end of string |
|
4541 |
br(Assembler::notZero, true, Assembler::pt, Lloop); |
|
38142
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4542 |
delayed()->load_sized_value(Address(str1, limit1), chr1, (ae == StrIntrinsicNode::UU) ? 2 : 1, false); |
33628 | 4543 |
|
4544 |
// If strings are equal up to min length, return the length difference. |
|
4545 |
if (ae == StrIntrinsicNode::UU) { |
|
4546 |
// Divide by 2 to get number of chars |
|
4547 |
sra(diff, 1, result); |
|
4548 |
} else { |
|
4549 |
mov(diff, result); |
|
4550 |
} |
|
4551 |
||
4552 |
// Otherwise, return the difference between the first mismatched chars. |
|
4553 |
bind(Ldone); |
|
4554 |
if(ae == StrIntrinsicNode::UL) { |
|
4555 |
// Negate result (see note above) |
|
4556 |
neg(result); |
|
4557 |
} |
|
4558 |
} |
|
4559 |
||
4560 |
void MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register ary2, |
|
4561 |
Register limit, Register tmp, Register result, bool is_byte) { |
|
38142
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4562 |
Label Ldone, Lloop, Lremaining; |
33628 | 4563 |
assert_different_registers(ary1, ary2, limit, tmp, result); |
4564 |
||
4565 |
int length_offset = arrayOopDesc::length_offset_in_bytes(); |
|
4566 |
int base_offset = arrayOopDesc::base_offset_in_bytes(is_byte ? T_BYTE : T_CHAR); |
|
38142
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4567 |
assert(base_offset % 8 == 0, "Base offset must be 8-byte aligned"); |
33628 | 4568 |
|
4569 |
if (is_array_equ) { |
|
4570 |
// return true if the same array |
|
4571 |
cmp(ary1, ary2); |
|
4572 |
brx(Assembler::equal, true, Assembler::pn, Ldone); |
|
38142
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4573 |
delayed()->mov(1, result); // equal |
33628 | 4574 |
|
4575 |
br_null(ary1, true, Assembler::pn, Ldone); |
|
38142
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4576 |
delayed()->clr(result); // not equal |
33628 | 4577 |
|
4578 |
br_null(ary2, true, Assembler::pn, Ldone); |
|
38142
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4579 |
delayed()->clr(result); // not equal |
33628 | 4580 |
|
4581 |
// load the lengths of arrays |
|
4582 |
ld(Address(ary1, length_offset), limit); |
|
4583 |
ld(Address(ary2, length_offset), tmp); |
|
4584 |
||
4585 |
// return false if the two arrays are not equal length |
|
4586 |
cmp(limit, tmp); |
|
4587 |
br(Assembler::notEqual, true, Assembler::pn, Ldone); |
|
38142
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4588 |
delayed()->clr(result); // not equal |
33628 | 4589 |
} |
4590 |
||
4591 |
cmp_zero_and_br(Assembler::zero, limit, Ldone, true, Assembler::pn); |
|
38142
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4592 |
delayed()->mov(1, result); // zero-length arrays are equal |
33628 | 4593 |
|
4594 |
if (is_array_equ) { |
|
4595 |
// load array addresses |
|
4596 |
add(ary1, base_offset, ary1); |
|
4597 |
add(ary2, base_offset, ary2); |
|
38142
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4598 |
// set byte count |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4599 |
if (!is_byte) { |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4600 |
sll(limit, exact_log2(sizeof(jchar)), limit); |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4601 |
} |
33628 | 4602 |
} else { |
4603 |
// We have no guarantee that on 64 bit the higher half of limit is 0 |
|
4604 |
signx(limit); |
|
4605 |
} |
|
4606 |
||
38142
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4607 |
#ifdef ASSERT |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4608 |
// Sanity check for doubleword (8-byte) alignment of ary1 and ary2. |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4609 |
// Guaranteed on 64-bit systems (see arrayOopDesc::header_size_in_bytes()). |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4610 |
Label Laligned; |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4611 |
or3(ary1, ary2, tmp); |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4612 |
andcc(tmp, 7, tmp); |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4613 |
br_null_short(tmp, Assembler::pn, Laligned); |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4614 |
STOP("First array element is not 8-byte aligned."); |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4615 |
should_not_reach_here(); |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4616 |
bind(Laligned); |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4617 |
#endif |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4618 |
|
14631 | 4619 |
// Shift ary1 and ary2 to the end of the arrays, negate limit |
4620 |
add(ary1, limit, ary1); |
|
4621 |
add(ary2, limit, ary2); |
|
4622 |
neg(limit, limit); |
|
4623 |
||
38142
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4624 |
// MAIN LOOP |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4625 |
// Load and compare array elements of size 'byte_width' until the elements are not |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4626 |
// equal or we reached the end of the arrays. If the size of the arrays is not a |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4627 |
// multiple of 'byte_width', we simply read over the end of the array, bail out and |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4628 |
// compare the remaining bytes below by skipping the garbage bytes. |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4629 |
ldx(ary1, limit, result); |
14631 | 4630 |
bind(Lloop); |
38142
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4631 |
ldx(ary2, limit, tmp); |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4632 |
inccc(limit, 8); |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4633 |
// Bail out if we reached the end (but still do the comparison) |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4634 |
br(Assembler::positive, false, Assembler::pn, Lremaining); |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4635 |
delayed()->cmp(result, tmp); |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4636 |
// Check equality of elements |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4637 |
brx(Assembler::equal, false, Assembler::pt, target(Lloop)); |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4638 |
delayed()->ldx(ary1, limit, result); |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4639 |
|
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4640 |
ba(Ldone); |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4641 |
delayed()->clr(result); // not equal |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4642 |
|
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4643 |
// TAIL COMPARISON |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4644 |
// We got here because we reached the end of the arrays. 'limit' is the number of |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4645 |
// garbage bytes we may have compared by reading over the end of the arrays. Shift |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4646 |
// out the garbage and compare the remaining elements. |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4647 |
bind(Lremaining); |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4648 |
// Optimistic shortcut: elements potentially including garbage are equal |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4649 |
brx(Assembler::equal, true, Assembler::pt, target(Ldone)); |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4650 |
delayed()->mov(1, result); // equal |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4651 |
// Shift 'limit' bytes to the right and compare |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4652 |
sll(limit, 3, limit); // bytes to bits |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4653 |
srlx(result, limit, result); |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4654 |
srlx(tmp, limit, tmp); |
33628 | 4655 |
cmp(result, tmp); |
38142
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4656 |
clr(result); |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4657 |
movcc(Assembler::equal, false, xcc, 1, result); |
e16b23089599
6941938: Improve array equals intrinsic on SPARC
thartmann
parents:
38042
diff
changeset
|
4658 |
|
33628 | 4659 |
bind(Ldone); |
14631 | 4660 |
} |
4661 |
||
36808
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4662 |
void MacroAssembler::has_negatives(Register inp, Register size, Register result, Register t2, Register t3, Register t4, Register t5) { |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4663 |
|
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4664 |
// test for negative bytes in input string of a given size |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4665 |
// result 1 if found, 0 otherwise. |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4666 |
|
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4667 |
Label Lcore, Ltail, Lreturn, Lcore_rpt; |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4668 |
|
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4669 |
assert_different_registers(inp, size, t2, t3, t4, t5, result); |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4670 |
|
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4671 |
Register i = result; // result used as integer index i until very end |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4672 |
Register lmask = t2; // t2 is aliased to lmask |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4673 |
|
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4674 |
// INITIALIZATION |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4675 |
// =========================================================== |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4676 |
// initialize highbits mask -> lmask = 0x8080808080808080 (8B/64b) |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4677 |
// compute unaligned offset -> i |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4678 |
// compute core end index -> t5 |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4679 |
Assembler::sethi(0x80808000, t2); //! sethi macro fails to emit optimal |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4680 |
add(t2, 0x80, t2); |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4681 |
sllx(t2, 32, t3); |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4682 |
or3(t3, t2, lmask); // 0x8080808080808080 -> lmask |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4683 |
sra(size,0,size); |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4684 |
andcc(inp, 0x7, i); // unaligned offset -> i |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4685 |
br(Assembler::zero, true, Assembler::pn, Lcore); // starts 8B aligned? |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4686 |
delayed()->add(size, -8, t5); // (annuled) core end index -> t5 |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4687 |
|
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4688 |
// =========================================================== |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4689 |
|
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4690 |
// UNALIGNED HEAD |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4691 |
// =========================================================== |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4692 |
// * unaligned head handling: grab aligned 8B containing unaligned inp(ut) |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4693 |
// * obliterate (ignore) bytes outside string by shifting off reg ends |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4694 |
// * compare with bitmask, short circuit return true if one or more high |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4695 |
// bits set. |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4696 |
cmp(size, 0); |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4697 |
br(Assembler::zero, true, Assembler::pn, Lreturn); // short-circuit? |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4698 |
delayed()->mov(0,result); // annuled so i not clobbered for following |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4699 |
neg(i, t4); |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4700 |
add(i, size, t5); |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4701 |
ldx(inp, t4, t3); // raw aligned 8B containing unaligned head -> t3 |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4702 |
mov(8, t4); |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4703 |
sub(t4, t5, t4); |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4704 |
sra(t4, 31, t5); |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4705 |
andn(t4, t5, t5); |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4706 |
add(i, t5, t4); |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4707 |
sll(t5, 3, t5); |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4708 |
sll(t4, 3, t4); // # bits to shift right, left -> t5,t4 |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4709 |
srlx(t3, t5, t3); |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4710 |
sllx(t3, t4, t3); // bytes outside string in 8B header obliterated -> t3 |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4711 |
andcc(lmask, t3, G0); |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4712 |
brx(Assembler::notZero, true, Assembler::pn, Lreturn); // short circuit? |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4713 |
delayed()->mov(1,result); // annuled so i not clobbered for following |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4714 |
add(size, -8, t5); // core end index -> t5 |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4715 |
mov(8, t4); |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4716 |
sub(t4, i, i); // # bytes examined in unalgn head (<8) -> i |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4717 |
// =========================================================== |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4718 |
|
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4719 |
// ALIGNED CORE |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4720 |
// =========================================================== |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4721 |
// * iterate index i over aligned 8B sections of core, comparing with |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4722 |
// bitmask, short circuit return true if one or more high bits set |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4723 |
// t5 contains core end index/loop limit which is the index |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4724 |
// of the MSB of last (unaligned) 8B fully contained in the string. |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4725 |
// inp contains address of first byte in string/array |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4726 |
// lmask contains 8B high bit mask for comparison |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4727 |
// i contains next index to be processed (adr. inp+i is on 8B boundary) |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4728 |
bind(Lcore); |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4729 |
cmp_and_br_short(i, t5, Assembler::greater, Assembler::pn, Ltail); |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4730 |
bind(Lcore_rpt); |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4731 |
ldx(inp, i, t3); |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4732 |
andcc(t3, lmask, G0); |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4733 |
brx(Assembler::notZero, true, Assembler::pn, Lreturn); |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4734 |
delayed()->mov(1, result); // annuled so i not clobbered for following |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4735 |
add(i, 8, i); |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4736 |
cmp_and_br_short(i, t5, Assembler::lessEqual, Assembler::pn, Lcore_rpt); |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4737 |
// =========================================================== |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4738 |
|
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4739 |
// ALIGNED TAIL (<8B) |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4740 |
// =========================================================== |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4741 |
// handle aligned tail of 7B or less as complete 8B, obliterating end of |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4742 |
// string bytes by shifting them off end, compare what's left with bitmask |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4743 |
// inp contains address of first byte in string/array |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4744 |
// lmask contains 8B high bit mask for comparison |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4745 |
// i contains next index to be processed (adr. inp+i is on 8B boundary) |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4746 |
bind(Ltail); |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4747 |
subcc(size, i, t4); // # of remaining bytes in string -> t4 |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4748 |
// return 0 if no more remaining bytes |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4749 |
br(Assembler::lessEqual, true, Assembler::pn, Lreturn); |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4750 |
delayed()->mov(0, result); // annuled so i not clobbered for following |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4751 |
ldx(inp, i, t3); // load final 8B (aligned) containing tail -> t3 |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4752 |
mov(8, t5); |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4753 |
sub(t5, t4, t4); |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4754 |
mov(0, result); // ** i clobbered at this point |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4755 |
sll(t4, 3, t4); // bits beyond end of string -> t4 |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4756 |
srlx(t3, t4, t3); // bytes beyond end now obliterated -> t3 |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4757 |
andcc(lmask, t3, G0); |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4758 |
movcc(Assembler::notZero, false, xcc, 1, result); |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4759 |
bind(Lreturn); |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4760 |
} |
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4761 |
|
33628 | 4762 |
#endif |
4763 |
||
36808
bae14ddeff3b
8144693: Intrinsify StringCoding.hasNegatives() on SPARC
thartmann
parents:
35913
diff
changeset
|
4764 |
|
14631 | 4765 |
// Use BIS for zeroing (count is in bytes). |
4766 |
void MacroAssembler::bis_zeroing(Register to, Register count, Register temp, Label& Ldone) { |
|
4767 |
assert(UseBlockZeroing && VM_Version::has_block_zeroing(), "only works with BIS zeroing"); |
|
4768 |
Register end = count; |
|
4769 |
int cache_line_size = VM_Version::prefetch_data_size(); |
|
4770 |
// Minimum count when BIS zeroing can be used since |
|
4771 |
// it needs membar which is expensive. |
|
4772 |
int block_zero_size = MAX2(cache_line_size*3, (int)BlockZeroingLowLimit); |
|
4773 |
||
4774 |
Label small_loop; |
|
4775 |
// Check if count is negative (dead code) or zero. |
|
4776 |
// Note, count uses 64bit in 64 bit VM. |
|
4777 |
cmp_and_brx_short(count, 0, Assembler::lessEqual, Assembler::pn, Ldone); |
|
4778 |
||
4779 |
// Use BIS zeroing only for big arrays since it requires membar. |
|
4780 |
if (Assembler::is_simm13(block_zero_size)) { // < 4096 |
|
4781 |
cmp(count, block_zero_size); |
|
4782 |
} else { |
|
4783 |
set(block_zero_size, temp); |
|
4784 |
cmp(count, temp); |
|
4785 |
} |
|
4786 |
br(Assembler::lessUnsigned, false, Assembler::pt, small_loop); |
|
4787 |
delayed()->add(to, count, end); |
|
4788 |
||
4789 |
// Note: size is >= three (32 bytes) cache lines. |
|
4790 |
||
4791 |
// Clean the beginning of space up to next cache line. |
|
4792 |
for (int offs = 0; offs < cache_line_size; offs += 8) { |
|
4793 |
stx(G0, to, offs); |
|
4794 |
} |
|
4795 |
||
4796 |
// align to next cache line |
|
4797 |
add(to, cache_line_size, to); |
|
4798 |
and3(to, -cache_line_size, to); |
|
4799 |
||
4800 |
// Note: size left >= two (32 bytes) cache lines. |
|
4801 |
||
4802 |
// BIS should not be used to zero tail (64 bytes) |
|
4803 |
// to avoid zeroing a header of the following object. |
|
4804 |
sub(end, (cache_line_size*2)-8, end); |
|
4805 |
||
4806 |
Label bis_loop; |
|
4807 |
bind(bis_loop); |
|
4808 |
stxa(G0, to, G0, Assembler::ASI_ST_BLKINIT_PRIMARY); |
|
4809 |
add(to, cache_line_size, to); |
|
4810 |
cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, bis_loop); |
|
4811 |
||
4812 |
// BIS needs membar. |
|
4813 |
membar(Assembler::StoreLoad); |
|
4814 |
||
4815 |
add(end, (cache_line_size*2)-8, end); // restore end |
|
4816 |
cmp_and_brx_short(to, end, Assembler::greaterEqualUnsigned, Assembler::pn, Ldone); |
|
4817 |
||
4818 |
// Clean the tail. |
|
4819 |
bind(small_loop); |
|
4820 |
stx(G0, to, 0); |
|
4821 |
add(to, 8, to); |
|
4822 |
cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, small_loop); |
|
4823 |
nop(); // Separate short branches |
|
4824 |
} |
|
31515 | 4825 |
|
4826 |
/** |
|
4827 |
* Update CRC-32[C] with a byte value according to constants in table |
|
4828 |
* |
|
4829 |
* @param [in,out]crc Register containing the crc. |
|
4830 |
* @param [in]val Register containing the byte to fold into the CRC. |
|
4831 |
* @param [in]table Register containing the table of crc constants. |
|
4832 |
* |
|
4833 |
* uint32_t crc; |
|
4834 |
* val = crc_table[(val ^ crc) & 0xFF]; |
|
4835 |
* crc = val ^ (crc >> 8); |
|
4836 |
*/ |
|
4837 |
void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { |
|
4838 |
xor3(val, crc, val); |
|
4839 |
and3(val, 0xFF, val); |
|
4840 |
sllx(val, 2, val); |
|
4841 |
lduw(table, val, val); |
|
4842 |
srlx(crc, 8, crc); |
|
4843 |
xor3(val, crc, crc); |
|
4844 |
} |
|
4845 |
||
4846 |
// Reverse byte order of lower 32 bits, assuming upper 32 bits all zeros |
|
4847 |
void MacroAssembler::reverse_bytes_32(Register src, Register dst, Register tmp) { |
|
38237
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
4848 |
srlx(src, 24, dst); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
4849 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
4850 |
sllx(src, 32+8, tmp); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
4851 |
srlx(tmp, 32+24, tmp); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
4852 |
sllx(tmp, 8, tmp); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
4853 |
or3(dst, tmp, dst); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
4854 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
4855 |
sllx(src, 32+16, tmp); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
4856 |
srlx(tmp, 32+24, tmp); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
4857 |
sllx(tmp, 16, tmp); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
4858 |
or3(dst, tmp, dst); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
4859 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
4860 |
sllx(src, 32+24, tmp); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
4861 |
srlx(tmp, 32, tmp); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
4862 |
or3(dst, tmp, dst); |
31515 | 4863 |
} |
4864 |
||
4865 |
void MacroAssembler::movitof_revbytes(Register src, FloatRegister dst, Register tmp1, Register tmp2) { |
|
4866 |
reverse_bytes_32(src, tmp1, tmp2); |
|
4867 |
movxtod(tmp1, dst); |
|
4868 |
} |
|
4869 |
||
4870 |
void MacroAssembler::movftoi_revbytes(FloatRegister src, Register dst, Register tmp1, Register tmp2) { |
|
4871 |
movdtox(src, tmp1); |
|
4872 |
reverse_bytes_32(tmp1, dst, tmp2); |
|
4873 |
} |
|
34205 | 4874 |
|
4875 |
void MacroAssembler::fold_128bit_crc32(Register xcrc_hi, Register xcrc_lo, Register xK_hi, Register xK_lo, Register xtmp_hi, Register xtmp_lo, Register buf, int offset) { |
|
4876 |
xmulx(xcrc_hi, xK_hi, xtmp_lo); |
|
4877 |
xmulxhi(xcrc_hi, xK_hi, xtmp_hi); |
|
4878 |
xmulxhi(xcrc_lo, xK_lo, xcrc_hi); |
|
4879 |
xmulx(xcrc_lo, xK_lo, xcrc_lo); |
|
4880 |
xor3(xcrc_lo, xtmp_lo, xcrc_lo); |
|
4881 |
xor3(xcrc_hi, xtmp_hi, xcrc_hi); |
|
4882 |
ldxl(buf, G0, xtmp_lo); |
|
4883 |
inc(buf, 8); |
|
4884 |
ldxl(buf, G0, xtmp_hi); |
|
4885 |
inc(buf, 8); |
|
4886 |
xor3(xcrc_lo, xtmp_lo, xcrc_lo); |
|
4887 |
xor3(xcrc_hi, xtmp_hi, xcrc_hi); |
|
4888 |
} |
|
4889 |
||
4890 |
void MacroAssembler::fold_128bit_crc32(Register xcrc_hi, Register xcrc_lo, Register xK_hi, Register xK_lo, Register xtmp_hi, Register xtmp_lo, Register xbuf_hi, Register xbuf_lo) { |
|
4891 |
mov(xcrc_lo, xtmp_lo); |
|
4892 |
mov(xcrc_hi, xtmp_hi); |
|
4893 |
xmulx(xtmp_hi, xK_hi, xtmp_lo); |
|
4894 |
xmulxhi(xtmp_hi, xK_hi, xtmp_hi); |
|
4895 |
xmulxhi(xcrc_lo, xK_lo, xcrc_hi); |
|
4896 |
xmulx(xcrc_lo, xK_lo, xcrc_lo); |
|
4897 |
xor3(xcrc_lo, xbuf_lo, xcrc_lo); |
|
4898 |
xor3(xcrc_hi, xbuf_hi, xcrc_hi); |
|
4899 |
xor3(xcrc_lo, xtmp_lo, xcrc_lo); |
|
4900 |
xor3(xcrc_hi, xtmp_hi, xcrc_hi); |
|
4901 |
} |
|
4902 |
||
4903 |
void MacroAssembler::fold_8bit_crc32(Register xcrc, Register table, Register xtmp, Register tmp) { |
|
4904 |
and3(xcrc, 0xFF, tmp); |
|
4905 |
sllx(tmp, 2, tmp); |
|
4906 |
lduw(table, tmp, xtmp); |
|
4907 |
srlx(xcrc, 8, xcrc); |
|
4908 |
xor3(xtmp, xcrc, xcrc); |
|
4909 |
} |
|
4910 |
||
4911 |
void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { |
|
4912 |
and3(crc, 0xFF, tmp); |
|
4913 |
srlx(crc, 8, crc); |
|
4914 |
sllx(tmp, 2, tmp); |
|
4915 |
lduw(table, tmp, tmp); |
|
4916 |
xor3(tmp, crc, crc); |
|
4917 |
} |
|
4918 |
||
4919 |
#define CRC32_TMP_REG_NUM 18 |
|
4920 |
||
4921 |
#define CRC32_CONST_64 0x163cd6124 |
|
4922 |
#define CRC32_CONST_96 0x0ccaa009e |
|
4923 |
#define CRC32_CONST_160 0x1751997d0 |
|
4924 |
#define CRC32_CONST_480 0x1c6e41596 |
|
4925 |
#define CRC32_CONST_544 0x154442bd4 |
|
4926 |
||
4927 |
void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table) { |
|
4928 |
||
4929 |
Label L_cleanup_loop, L_cleanup_check, L_align_loop, L_align_check; |
|
4930 |
Label L_main_loop_prologue; |
|
4931 |
Label L_fold_512b, L_fold_512b_loop, L_fold_128b; |
|
4932 |
Label L_fold_tail, L_fold_tail_loop; |
|
4933 |
Label L_8byte_fold_loop, L_8byte_fold_check; |
|
4934 |
||
4935 |
const Register tmp[CRC32_TMP_REG_NUM] = {L0, L1, L2, L3, L4, L5, L6, G1, I0, I1, I2, I3, I4, I5, I7, O4, O5, G3}; |
|
4936 |
||
4937 |
Register const_64 = tmp[CRC32_TMP_REG_NUM-1]; |
|
4938 |
Register const_96 = tmp[CRC32_TMP_REG_NUM-1]; |
|
4939 |
Register const_160 = tmp[CRC32_TMP_REG_NUM-2]; |
|
4940 |
Register const_480 = tmp[CRC32_TMP_REG_NUM-1]; |
|
4941 |
Register const_544 = tmp[CRC32_TMP_REG_NUM-2]; |
|
4942 |
||
4943 |
set(ExternalAddress(StubRoutines::crc_table_addr()), table); |
|
4944 |
||
4945 |
not1(crc); // ~c |
|
4946 |
clruwu(crc); // clear upper 32 bits of crc |
|
4947 |
||
4948 |
// Check if below cutoff, proceed directly to cleanup code |
|
4949 |
mov(31, G4); |
|
4950 |
cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_cleanup_check); |
|
4951 |
||
4952 |
// Align buffer to 8 byte boundry |
|
4953 |
mov(8, O5); |
|
4954 |
and3(buf, 0x7, O4); |
|
4955 |
sub(O5, O4, O5); |
|
4956 |
and3(O5, 0x7, O5); |
|
4957 |
sub(len, O5, len); |
|
4958 |
ba(L_align_check); |
|
4959 |
delayed()->nop(); |
|
4960 |
||
4961 |
// Alignment loop, table look up method for up to 7 bytes |
|
4962 |
bind(L_align_loop); |
|
4963 |
ldub(buf, 0, O4); |
|
4964 |
inc(buf); |
|
4965 |
dec(O5); |
|
4966 |
xor3(O4, crc, O4); |
|
4967 |
and3(O4, 0xFF, O4); |
|
4968 |
sllx(O4, 2, O4); |
|
4969 |
lduw(table, O4, O4); |
|
4970 |
srlx(crc, 8, crc); |
|
4971 |
xor3(O4, crc, crc); |
|
4972 |
bind(L_align_check); |
|
4973 |
nop(); |
|
4974 |
cmp_and_br_short(O5, 0, Assembler::notEqual, Assembler::pt, L_align_loop); |
|
4975 |
||
4976 |
// Aligned on 64-bit (8-byte) boundry at this point |
|
4977 |
// Check if still above cutoff (31-bytes) |
|
4978 |
mov(31, G4); |
|
4979 |
cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_cleanup_check); |
|
4980 |
// At least 32 bytes left to process |
|
4981 |
||
4982 |
// Free up registers by storing them to FP registers |
|
4983 |
for (int i = 0; i < CRC32_TMP_REG_NUM; i++) { |
|
4984 |
movxtod(tmp[i], as_FloatRegister(2*i)); |
|
4985 |
} |
|
4986 |
||
4987 |
// Determine which loop to enter |
|
4988 |
// Shared prologue |
|
4989 |
ldxl(buf, G0, tmp[0]); |
|
4990 |
inc(buf, 8); |
|
4991 |
ldxl(buf, G0, tmp[1]); |
|
4992 |
inc(buf, 8); |
|
4993 |
xor3(tmp[0], crc, tmp[0]); // Fold CRC into first few bytes |
|
4994 |
and3(crc, 0, crc); // Clear out the crc register |
|
4995 |
// Main loop needs 128-bytes at least |
|
4996 |
mov(128, G4); |
|
4997 |
mov(64, tmp[2]); |
|
4998 |
cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_main_loop_prologue); |
|
4999 |
// Less than 64 bytes |
|
5000 |
nop(); |
|
5001 |
cmp_and_br_short(len, tmp[2], Assembler::lessUnsigned, Assembler::pt, L_fold_tail); |
|
5002 |
// Between 64 and 127 bytes |
|
5003 |
set64(CRC32_CONST_96, const_96, tmp[8]); |
|
5004 |
set64(CRC32_CONST_160, const_160, tmp[9]); |
|
5005 |
fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[2], tmp[3], buf, 0); |
|
5006 |
fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[4], tmp[5], buf, 16); |
|
5007 |
fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[6], tmp[7], buf, 32); |
|
5008 |
dec(len, 48); |
|
5009 |
ba(L_fold_tail); |
|
5010 |
delayed()->nop(); |
|
5011 |
||
5012 |
bind(L_main_loop_prologue); |
|
5013 |
for (int i = 2; i < 8; i++) { |
|
5014 |
ldxl(buf, G0, tmp[i]); |
|
5015 |
inc(buf, 8); |
|
5016 |
} |
|
5017 |
||
5018 |
// Fold total 512 bits of polynomial on each iteration, |
|
5019 |
// 128 bits per each of 4 parallel streams |
|
5020 |
set64(CRC32_CONST_480, const_480, tmp[8]); |
|
5021 |
set64(CRC32_CONST_544, const_544, tmp[9]); |
|
5022 |
||
5023 |
mov(128, G4); |
|
5024 |
bind(L_fold_512b_loop); |
|
5025 |
fold_128bit_crc32(tmp[1], tmp[0], const_480, const_544, tmp[9], tmp[8], buf, 0); |
|
5026 |
fold_128bit_crc32(tmp[3], tmp[2], const_480, const_544, tmp[11], tmp[10], buf, 16); |
|
5027 |
fold_128bit_crc32(tmp[5], tmp[4], const_480, const_544, tmp[13], tmp[12], buf, 32); |
|
5028 |
fold_128bit_crc32(tmp[7], tmp[6], const_480, const_544, tmp[15], tmp[14], buf, 64); |
|
5029 |
dec(len, 64); |
|
5030 |
cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_fold_512b_loop); |
|
5031 |
||
5032 |
// Fold 512 bits to 128 bits |
|
5033 |
bind(L_fold_512b); |
|
5034 |
set64(CRC32_CONST_96, const_96, tmp[8]); |
|
5035 |
set64(CRC32_CONST_160, const_160, tmp[9]); |
|
5036 |
||
5037 |
fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[3], tmp[2]); |
|
5038 |
fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[5], tmp[4]); |
|
5039 |
fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[8], tmp[9], tmp[7], tmp[6]); |
|
5040 |
dec(len, 48); |
|
5041 |
||
5042 |
// Fold the rest of 128 bits data chunks |
|
5043 |
bind(L_fold_tail); |
|
5044 |
mov(32, G4); |
|
5045 |
cmp_and_br_short(len, G4, Assembler::lessEqualUnsigned, Assembler::pt, L_fold_128b); |
|
5046 |
||
5047 |
set64(CRC32_CONST_96, const_96, tmp[8]); |
|
5048 |
set64(CRC32_CONST_160, const_160, tmp[9]); |
|
5049 |
||
5050 |
bind(L_fold_tail_loop); |
|
5051 |
fold_128bit_crc32(tmp[1], tmp[0], const_96, const_160, tmp[2], tmp[3], buf, 0); |
|
5052 |
sub(len, 16, len); |
|
5053 |
cmp_and_br_short(len, G4, Assembler::greaterEqualUnsigned, Assembler::pt, L_fold_tail_loop); |
|
5054 |
||
5055 |
// Fold the 128 bits in tmps 0 - 1 into tmp 1 |
|
5056 |
bind(L_fold_128b); |
|
5057 |
||
5058 |
set64(CRC32_CONST_64, const_64, tmp[4]); |
|
5059 |
||
5060 |
xmulx(const_64, tmp[0], tmp[2]); |
|
5061 |
xmulxhi(const_64, tmp[0], tmp[3]); |
|
5062 |
||
5063 |
srl(tmp[2], G0, tmp[4]); |
|
5064 |
xmulx(const_64, tmp[4], tmp[4]); |
|
5065 |
||
5066 |
srlx(tmp[2], 32, tmp[2]); |
|
5067 |
sllx(tmp[3], 32, tmp[3]); |
|
5068 |
or3(tmp[2], tmp[3], tmp[2]); |
|
5069 |
||
5070 |
xor3(tmp[4], tmp[1], tmp[4]); |
|
5071 |
xor3(tmp[4], tmp[2], tmp[1]); |
|
5072 |
dec(len, 8); |
|
5073 |
||
5074 |
// Use table lookup for the 8 bytes left in tmp[1] |
|
5075 |
dec(len, 8); |
|
5076 |
||
5077 |
// 8 8-bit folds to compute 32-bit CRC. |
|
5078 |
for (int j = 0; j < 4; j++) { |
|
5079 |
fold_8bit_crc32(tmp[1], table, tmp[2], tmp[3]); |
|
5080 |
} |
|
5081 |
srl(tmp[1], G0, crc); // move 32 bits to general register |
|
5082 |
for (int j = 0; j < 4; j++) { |
|
5083 |
fold_8bit_crc32(crc, table, tmp[3]); |
|
5084 |
} |
|
5085 |
||
5086 |
bind(L_8byte_fold_check); |
|
5087 |
||
5088 |
// Restore int registers saved in FP registers |
|
5089 |
for (int i = 0; i < CRC32_TMP_REG_NUM; i++) { |
|
5090 |
movdtox(as_FloatRegister(2*i), tmp[i]); |
|
5091 |
} |
|
5092 |
||
5093 |
ba(L_cleanup_check); |
|
5094 |
delayed()->nop(); |
|
5095 |
||
5096 |
// Table look-up method for the remaining few bytes |
|
5097 |
bind(L_cleanup_loop); |
|
5098 |
ldub(buf, 0, O4); |
|
5099 |
inc(buf); |
|
5100 |
dec(len); |
|
5101 |
xor3(O4, crc, O4); |
|
5102 |
and3(O4, 0xFF, O4); |
|
5103 |
sllx(O4, 2, O4); |
|
5104 |
lduw(table, O4, O4); |
|
5105 |
srlx(crc, 8, crc); |
|
5106 |
xor3(O4, crc, crc); |
|
5107 |
bind(L_cleanup_check); |
|
5108 |
nop(); |
|
5109 |
cmp_and_br_short(len, 0, Assembler::greaterUnsigned, Assembler::pt, L_cleanup_loop); |
|
5110 |
||
5111 |
not1(crc); |
|
5112 |
} |
|
5113 |
||
38237
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5114 |
#define CHUNK_LEN 128 /* 128 x 8B = 1KB */ |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5115 |
#define CHUNK_K1 0x1307a0206 /* reverseBits(pow(x, CHUNK_LEN*8*8*3 - 32) mod P(x)) << 1 */ |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5116 |
#define CHUNK_K2 0x1a0f717c4 /* reverseBits(pow(x, CHUNK_LEN*8*8*2 - 32) mod P(x)) << 1 */ |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5117 |
#define CHUNK_K3 0x0170076fa /* reverseBits(pow(x, CHUNK_LEN*8*8*1 - 32) mod P(x)) << 1 */ |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5118 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5119 |
void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len, Register table) { |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5120 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5121 |
Label L_crc32c_head, L_crc32c_aligned; |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5122 |
Label L_crc32c_parallel, L_crc32c_parallel_loop; |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5123 |
Label L_crc32c_serial, L_crc32c_x32_loop, L_crc32c_x8, L_crc32c_x8_loop; |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5124 |
Label L_crc32c_done, L_crc32c_tail, L_crc32c_return; |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5125 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5126 |
set(ExternalAddress(StubRoutines::crc32c_table_addr()), table); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5127 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5128 |
cmp_and_br_short(len, 0, Assembler::lessEqual, Assembler::pn, L_crc32c_return); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5129 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5130 |
// clear upper 32 bits of crc |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5131 |
clruwu(crc); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5132 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5133 |
and3(buf, 7, G4); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5134 |
cmp_and_brx_short(G4, 0, Assembler::equal, Assembler::pt, L_crc32c_aligned); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5135 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5136 |
mov(8, G1); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5137 |
sub(G1, G4, G4); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5138 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5139 |
// ------ process the misaligned head (7 bytes or less) ------ |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5140 |
bind(L_crc32c_head); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5141 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5142 |
// crc = (crc >>> 8) ^ byteTable[(crc ^ b) & 0xFF]; |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5143 |
ldub(buf, 0, G1); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5144 |
update_byte_crc32(crc, G1, table); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5145 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5146 |
inc(buf); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5147 |
dec(len); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5148 |
cmp_and_br_short(len, 0, Assembler::equal, Assembler::pn, L_crc32c_return); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5149 |
dec(G4); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5150 |
cmp_and_br_short(G4, 0, Assembler::greater, Assembler::pt, L_crc32c_head); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5151 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5152 |
// ------ process the 8-byte-aligned body ------ |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5153 |
bind(L_crc32c_aligned); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5154 |
nop(); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5155 |
cmp_and_br_short(len, 8, Assembler::less, Assembler::pn, L_crc32c_tail); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5156 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5157 |
// reverse the byte order of lower 32 bits to big endian, and move to FP side |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5158 |
movitof_revbytes(crc, F0, G1, G3); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5159 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5160 |
set(CHUNK_LEN*8*4, G4); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5161 |
cmp_and_br_short(len, G4, Assembler::less, Assembler::pt, L_crc32c_serial); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5162 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5163 |
// ------ process four 1KB chunks in parallel ------ |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5164 |
bind(L_crc32c_parallel); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5165 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5166 |
fzero(FloatRegisterImpl::D, F2); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5167 |
fzero(FloatRegisterImpl::D, F4); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5168 |
fzero(FloatRegisterImpl::D, F6); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5169 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5170 |
mov(CHUNK_LEN - 1, G4); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5171 |
bind(L_crc32c_parallel_loop); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5172 |
// schedule ldf's ahead of crc32c's to hide the load-use latency |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5173 |
ldf(FloatRegisterImpl::D, buf, 0, F8); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5174 |
ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*8, F10); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5175 |
ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*16, F12); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5176 |
ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*24, F14); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5177 |
crc32c(F0, F8, F0); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5178 |
crc32c(F2, F10, F2); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5179 |
crc32c(F4, F12, F4); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5180 |
crc32c(F6, F14, F6); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5181 |
inc(buf, 8); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5182 |
dec(G4); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5183 |
cmp_and_br_short(G4, 0, Assembler::greater, Assembler::pt, L_crc32c_parallel_loop); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5184 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5185 |
ldf(FloatRegisterImpl::D, buf, 0, F8); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5186 |
ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*8, F10); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5187 |
ldf(FloatRegisterImpl::D, buf, CHUNK_LEN*16, F12); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5188 |
crc32c(F0, F8, F0); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5189 |
crc32c(F2, F10, F2); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5190 |
crc32c(F4, F12, F4); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5191 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5192 |
inc(buf, CHUNK_LEN*24); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5193 |
ldfl(FloatRegisterImpl::D, buf, G0, F14); // load in little endian |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5194 |
inc(buf, 8); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5195 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5196 |
prefetch(buf, 0, Assembler::severalReads); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5197 |
prefetch(buf, CHUNK_LEN*8, Assembler::severalReads); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5198 |
prefetch(buf, CHUNK_LEN*16, Assembler::severalReads); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5199 |
prefetch(buf, CHUNK_LEN*24, Assembler::severalReads); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5200 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5201 |
// move to INT side, and reverse the byte order of lower 32 bits to little endian |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5202 |
movftoi_revbytes(F0, O4, G1, G4); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5203 |
movftoi_revbytes(F2, O5, G1, G4); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5204 |
movftoi_revbytes(F4, G5, G1, G4); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5205 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5206 |
// combine the results of 4 chunks |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5207 |
set64(CHUNK_K1, G3, G1); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5208 |
xmulx(O4, G3, O4); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5209 |
set64(CHUNK_K2, G3, G1); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5210 |
xmulx(O5, G3, O5); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5211 |
set64(CHUNK_K3, G3, G1); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5212 |
xmulx(G5, G3, G5); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5213 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5214 |
movdtox(F14, G4); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5215 |
xor3(O4, O5, O5); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5216 |
xor3(G5, O5, O5); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5217 |
xor3(G4, O5, O5); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5218 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5219 |
// reverse the byte order to big endian, via stack, and move to FP side |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5220 |
// TODO: use new revb instruction |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5221 |
add(SP, -8, G1); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5222 |
srlx(G1, 3, G1); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5223 |
sllx(G1, 3, G1); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5224 |
stx(O5, G1, G0); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5225 |
ldfl(FloatRegisterImpl::D, G1, G0, F2); // load in little endian |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5226 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5227 |
crc32c(F6, F2, F0); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5228 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5229 |
set(CHUNK_LEN*8*4, G4); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5230 |
sub(len, G4, len); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5231 |
cmp_and_br_short(len, G4, Assembler::greaterEqual, Assembler::pt, L_crc32c_parallel); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5232 |
nop(); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5233 |
cmp_and_br_short(len, 0, Assembler::equal, Assembler::pt, L_crc32c_done); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5234 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5235 |
bind(L_crc32c_serial); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5236 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5237 |
mov(32, G4); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5238 |
cmp_and_br_short(len, G4, Assembler::less, Assembler::pn, L_crc32c_x8); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5239 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5240 |
// ------ process 32B chunks ------ |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5241 |
bind(L_crc32c_x32_loop); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5242 |
ldf(FloatRegisterImpl::D, buf, 0, F2); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5243 |
crc32c(F0, F2, F0); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5244 |
ldf(FloatRegisterImpl::D, buf, 8, F2); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5245 |
crc32c(F0, F2, F0); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5246 |
ldf(FloatRegisterImpl::D, buf, 16, F2); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5247 |
crc32c(F0, F2, F0); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5248 |
ldf(FloatRegisterImpl::D, buf, 24, F2); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5249 |
inc(buf, 32); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5250 |
crc32c(F0, F2, F0); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5251 |
dec(len, 32); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5252 |
cmp_and_br_short(len, G4, Assembler::greaterEqual, Assembler::pt, L_crc32c_x32_loop); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5253 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5254 |
bind(L_crc32c_x8); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5255 |
nop(); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5256 |
cmp_and_br_short(len, 8, Assembler::less, Assembler::pt, L_crc32c_done); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5257 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5258 |
// ------ process 8B chunks ------ |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5259 |
bind(L_crc32c_x8_loop); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5260 |
ldf(FloatRegisterImpl::D, buf, 0, F2); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5261 |
inc(buf, 8); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5262 |
crc32c(F0, F2, F0); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5263 |
dec(len, 8); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5264 |
cmp_and_br_short(len, 8, Assembler::greaterEqual, Assembler::pt, L_crc32c_x8_loop); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5265 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5266 |
bind(L_crc32c_done); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5267 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5268 |
// move to INT side, and reverse the byte order of lower 32 bits to little endian |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5269 |
movftoi_revbytes(F0, crc, G1, G3); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5270 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5271 |
cmp_and_br_short(len, 0, Assembler::equal, Assembler::pt, L_crc32c_return); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5272 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5273 |
// ------ process the misaligned tail (7 bytes or less) ------ |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5274 |
bind(L_crc32c_tail); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5275 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5276 |
// crc = (crc >>> 8) ^ byteTable[(crc ^ b) & 0xFF]; |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5277 |
ldub(buf, 0, G1); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5278 |
update_byte_crc32(crc, G1, table); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5279 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5280 |
inc(buf); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5281 |
dec(len); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5282 |
cmp_and_br_short(len, 0, Assembler::greater, Assembler::pt, L_crc32c_tail); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5283 |
|
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5284 |
bind(L_crc32c_return); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5285 |
nop(); |
d972e3a2df53
8155162: java.util.zip.CRC32C Interpreter/C1 intrinsics support on SPARC
kvn
parents:
38142
diff
changeset
|
5286 |
} |