14631
|
1 |
/*
|
|
2 |
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 |
*
|
|
5 |
* This code is free software; you can redistribute it and/or modify it
|
|
6 |
* under the terms of the GNU General Public License version 2 only, as
|
|
7 |
* published by the Free Software Foundation.
|
|
8 |
*
|
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
13 |
* accompanied this code).
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License version
|
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 |
*
|
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
20 |
* or visit www.oracle.com if you need additional information or have any
|
|
21 |
* questions.
|
|
22 |
*
|
|
23 |
*/
|
|
24 |
|
|
25 |
#ifndef CPU_SPARC_VM_MACROASSEMBLER_SPARC_INLINE_HPP
|
|
26 |
#define CPU_SPARC_VM_MACROASSEMBLER_SPARC_INLINE_HPP
|
|
27 |
|
|
28 |
#include "asm/assembler.inline.hpp"
|
|
29 |
#include "asm/macroAssembler.hpp"
|
|
30 |
#include "asm/codeBuffer.hpp"
|
|
31 |
#include "code/codeCache.hpp"
|
|
32 |
|
|
33 |
inline bool Address::is_simm13(int offset) { return Assembler::is_simm13(disp() + offset); }
|
|
34 |
|
|
35 |
|
|
36 |
inline int AddressLiteral::low10() const {
|
|
37 |
return Assembler::low10(value());
|
|
38 |
}
|
|
39 |
|
|
40 |
|
|
41 |
inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
|
|
42 |
jint& stub_inst = *(jint*) branch;
|
|
43 |
stub_inst = patched_branch(target - branch, stub_inst, 0);
|
|
44 |
}
|
|
45 |
|
|
46 |
// Use the right loads/stores for the platform
|
|
47 |
inline void MacroAssembler::ld_ptr( Register s1, Register s2, Register d ) {
|
|
48 |
#ifdef _LP64
|
|
49 |
Assembler::ldx(s1, s2, d);
|
|
50 |
#else
|
|
51 |
ld( s1, s2, d);
|
|
52 |
#endif
|
|
53 |
}
|
|
54 |
|
|
55 |
inline void MacroAssembler::ld_ptr( Register s1, int simm13a, Register d ) {
|
|
56 |
#ifdef _LP64
|
|
57 |
Assembler::ldx(s1, simm13a, d);
|
|
58 |
#else
|
|
59 |
ld( s1, simm13a, d);
|
|
60 |
#endif
|
|
61 |
}
|
|
62 |
|
|
63 |
#ifdef ASSERT
|
|
64 |
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
|
|
65 |
inline void MacroAssembler::ld_ptr( Register s1, ByteSize simm13a, Register d ) {
|
|
66 |
ld_ptr(s1, in_bytes(simm13a), d);
|
|
67 |
}
|
|
68 |
#endif
|
|
69 |
|
|
70 |
inline void MacroAssembler::ld_ptr( Register s1, RegisterOrConstant s2, Register d ) {
|
|
71 |
#ifdef _LP64
|
|
72 |
ldx(s1, s2, d);
|
|
73 |
#else
|
|
74 |
ld( s1, s2, d);
|
|
75 |
#endif
|
|
76 |
}
|
|
77 |
|
|
78 |
inline void MacroAssembler::ld_ptr(const Address& a, Register d, int offset) {
|
|
79 |
#ifdef _LP64
|
|
80 |
ldx(a, d, offset);
|
|
81 |
#else
|
|
82 |
ld( a, d, offset);
|
|
83 |
#endif
|
|
84 |
}
|
|
85 |
|
|
86 |
inline void MacroAssembler::st_ptr( Register d, Register s1, Register s2 ) {
|
|
87 |
#ifdef _LP64
|
|
88 |
Assembler::stx(d, s1, s2);
|
|
89 |
#else
|
|
90 |
st( d, s1, s2);
|
|
91 |
#endif
|
|
92 |
}
|
|
93 |
|
|
94 |
inline void MacroAssembler::st_ptr( Register d, Register s1, int simm13a ) {
|
|
95 |
#ifdef _LP64
|
|
96 |
Assembler::stx(d, s1, simm13a);
|
|
97 |
#else
|
|
98 |
st( d, s1, simm13a);
|
|
99 |
#endif
|
|
100 |
}
|
|
101 |
|
|
102 |
#ifdef ASSERT
|
|
103 |
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
|
|
104 |
inline void MacroAssembler::st_ptr( Register d, Register s1, ByteSize simm13a ) {
|
|
105 |
st_ptr(d, s1, in_bytes(simm13a));
|
|
106 |
}
|
|
107 |
#endif
|
|
108 |
|
|
109 |
inline void MacroAssembler::st_ptr( Register d, Register s1, RegisterOrConstant s2 ) {
|
|
110 |
#ifdef _LP64
|
|
111 |
stx(d, s1, s2);
|
|
112 |
#else
|
|
113 |
st( d, s1, s2);
|
|
114 |
#endif
|
|
115 |
}
|
|
116 |
|
|
117 |
inline void MacroAssembler::st_ptr(Register d, const Address& a, int offset) {
|
|
118 |
#ifdef _LP64
|
|
119 |
stx(d, a, offset);
|
|
120 |
#else
|
|
121 |
st( d, a, offset);
|
|
122 |
#endif
|
|
123 |
}
|
|
124 |
|
|
125 |
// Use the right loads/stores for the platform
|
|
126 |
inline void MacroAssembler::ld_long( Register s1, Register s2, Register d ) {
|
|
127 |
#ifdef _LP64
|
|
128 |
Assembler::ldx(s1, s2, d);
|
|
129 |
#else
|
|
130 |
Assembler::ldd(s1, s2, d);
|
|
131 |
#endif
|
|
132 |
}
|
|
133 |
|
|
134 |
inline void MacroAssembler::ld_long( Register s1, int simm13a, Register d ) {
|
|
135 |
#ifdef _LP64
|
|
136 |
Assembler::ldx(s1, simm13a, d);
|
|
137 |
#else
|
|
138 |
Assembler::ldd(s1, simm13a, d);
|
|
139 |
#endif
|
|
140 |
}
|
|
141 |
|
|
142 |
inline void MacroAssembler::ld_long( Register s1, RegisterOrConstant s2, Register d ) {
|
|
143 |
#ifdef _LP64
|
|
144 |
ldx(s1, s2, d);
|
|
145 |
#else
|
|
146 |
ldd(s1, s2, d);
|
|
147 |
#endif
|
|
148 |
}
|
|
149 |
|
|
150 |
inline void MacroAssembler::ld_long(const Address& a, Register d, int offset) {
|
|
151 |
#ifdef _LP64
|
|
152 |
ldx(a, d, offset);
|
|
153 |
#else
|
|
154 |
ldd(a, d, offset);
|
|
155 |
#endif
|
|
156 |
}
|
|
157 |
|
|
158 |
inline void MacroAssembler::st_long( Register d, Register s1, Register s2 ) {
|
|
159 |
#ifdef _LP64
|
|
160 |
Assembler::stx(d, s1, s2);
|
|
161 |
#else
|
|
162 |
Assembler::std(d, s1, s2);
|
|
163 |
#endif
|
|
164 |
}
|
|
165 |
|
|
166 |
inline void MacroAssembler::st_long( Register d, Register s1, int simm13a ) {
|
|
167 |
#ifdef _LP64
|
|
168 |
Assembler::stx(d, s1, simm13a);
|
|
169 |
#else
|
|
170 |
Assembler::std(d, s1, simm13a);
|
|
171 |
#endif
|
|
172 |
}
|
|
173 |
|
|
174 |
inline void MacroAssembler::st_long( Register d, Register s1, RegisterOrConstant s2 ) {
|
|
175 |
#ifdef _LP64
|
|
176 |
stx(d, s1, s2);
|
|
177 |
#else
|
|
178 |
std(d, s1, s2);
|
|
179 |
#endif
|
|
180 |
}
|
|
181 |
|
|
182 |
inline void MacroAssembler::st_long( Register d, const Address& a, int offset ) {
|
|
183 |
#ifdef _LP64
|
|
184 |
stx(d, a, offset);
|
|
185 |
#else
|
|
186 |
std(d, a, offset);
|
|
187 |
#endif
|
|
188 |
}
|
|
189 |
|
|
190 |
// Functions for isolating 64 bit shifts for LP64
|
|
191 |
|
|
192 |
inline void MacroAssembler::sll_ptr( Register s1, Register s2, Register d ) {
|
|
193 |
#ifdef _LP64
|
|
194 |
Assembler::sllx(s1, s2, d);
|
|
195 |
#else
|
|
196 |
Assembler::sll( s1, s2, d);
|
|
197 |
#endif
|
|
198 |
}
|
|
199 |
|
|
200 |
inline void MacroAssembler::sll_ptr( Register s1, int imm6a, Register d ) {
|
|
201 |
#ifdef _LP64
|
|
202 |
Assembler::sllx(s1, imm6a, d);
|
|
203 |
#else
|
|
204 |
Assembler::sll( s1, imm6a, d);
|
|
205 |
#endif
|
|
206 |
}
|
|
207 |
|
|
208 |
inline void MacroAssembler::srl_ptr( Register s1, Register s2, Register d ) {
|
|
209 |
#ifdef _LP64
|
|
210 |
Assembler::srlx(s1, s2, d);
|
|
211 |
#else
|
|
212 |
Assembler::srl( s1, s2, d);
|
|
213 |
#endif
|
|
214 |
}
|
|
215 |
|
|
216 |
inline void MacroAssembler::srl_ptr( Register s1, int imm6a, Register d ) {
|
|
217 |
#ifdef _LP64
|
|
218 |
Assembler::srlx(s1, imm6a, d);
|
|
219 |
#else
|
|
220 |
Assembler::srl( s1, imm6a, d);
|
|
221 |
#endif
|
|
222 |
}
|
|
223 |
|
|
224 |
inline void MacroAssembler::sll_ptr( Register s1, RegisterOrConstant s2, Register d ) {
|
|
225 |
if (s2.is_register()) sll_ptr(s1, s2.as_register(), d);
|
|
226 |
else sll_ptr(s1, s2.as_constant(), d);
|
|
227 |
}
|
|
228 |
|
|
229 |
// Use the right branch for the platform
|
|
230 |
|
|
231 |
inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
|
|
232 |
if (VM_Version::v9_instructions_work())
|
|
233 |
Assembler::bp(c, a, icc, p, d, rt);
|
|
234 |
else
|
|
235 |
Assembler::br(c, a, d, rt);
|
|
236 |
}
|
|
237 |
|
|
238 |
inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
|
|
239 |
br(c, a, p, target(L));
|
|
240 |
}
|
|
241 |
|
|
242 |
|
|
243 |
// Branch that tests either xcc or icc depending on the
|
|
244 |
// architecture compiled (LP64 or not)
|
|
245 |
inline void MacroAssembler::brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
|
|
246 |
#ifdef _LP64
|
|
247 |
Assembler::bp(c, a, xcc, p, d, rt);
|
|
248 |
#else
|
|
249 |
MacroAssembler::br(c, a, p, d, rt);
|
|
250 |
#endif
|
|
251 |
}
|
|
252 |
|
|
253 |
inline void MacroAssembler::brx( Condition c, bool a, Predict p, Label& L ) {
|
|
254 |
brx(c, a, p, target(L));
|
|
255 |
}
|
|
256 |
|
|
257 |
inline void MacroAssembler::ba( Label& L ) {
|
|
258 |
br(always, false, pt, L);
|
|
259 |
}
|
|
260 |
|
|
261 |
// Warning: V9 only functions
|
|
262 |
inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) {
|
|
263 |
Assembler::bp(c, a, cc, p, d, rt);
|
|
264 |
}
|
|
265 |
|
|
266 |
inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) {
|
|
267 |
Assembler::bp(c, a, cc, p, L);
|
|
268 |
}
|
|
269 |
|
|
270 |
inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
|
|
271 |
if (VM_Version::v9_instructions_work())
|
|
272 |
fbp(c, a, fcc0, p, d, rt);
|
|
273 |
else
|
|
274 |
Assembler::fb(c, a, d, rt);
|
|
275 |
}
|
|
276 |
|
|
277 |
inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) {
|
|
278 |
fb(c, a, p, target(L));
|
|
279 |
}
|
|
280 |
|
|
281 |
inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) {
|
|
282 |
Assembler::fbp(c, a, cc, p, d, rt);
|
|
283 |
}
|
|
284 |
|
|
285 |
inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) {
|
|
286 |
Assembler::fbp(c, a, cc, p, L);
|
|
287 |
}
|
|
288 |
|
|
289 |
inline void MacroAssembler::jmp( Register s1, Register s2 ) { jmpl( s1, s2, G0 ); }
|
|
290 |
inline void MacroAssembler::jmp( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, G0, rspec); }
|
|
291 |
|
|
292 |
inline bool MacroAssembler::is_far_target(address d) {
|
|
293 |
if (ForceUnreachable) {
|
|
294 |
// References outside the code cache should be treated as far
|
|
295 |
return d < CodeCache::low_bound() || d > CodeCache::high_bound();
|
|
296 |
}
|
|
297 |
return !is_in_wdisp30_range(d, CodeCache::low_bound()) || !is_in_wdisp30_range(d, CodeCache::high_bound());
|
|
298 |
}
|
|
299 |
|
|
300 |
// Call with a check to see if we need to deal with the added
|
|
301 |
// expense of relocation and if we overflow the displacement
|
|
302 |
// of the quick call instruction.
|
|
303 |
inline void MacroAssembler::call( address d, relocInfo::relocType rt ) {
|
|
304 |
#ifdef _LP64
|
|
305 |
intptr_t disp;
|
|
306 |
// NULL is ok because it will be relocated later.
|
|
307 |
// Must change NULL to a reachable address in order to
|
|
308 |
// pass asserts here and in wdisp.
|
|
309 |
if ( d == NULL )
|
|
310 |
d = pc();
|
|
311 |
|
|
312 |
// Is this address within range of the call instruction?
|
|
313 |
// If not, use the expensive instruction sequence
|
|
314 |
if (is_far_target(d)) {
|
|
315 |
relocate(rt);
|
|
316 |
AddressLiteral dest(d);
|
|
317 |
jumpl_to(dest, O7, O7);
|
|
318 |
} else {
|
|
319 |
Assembler::call(d, rt);
|
|
320 |
}
|
|
321 |
#else
|
|
322 |
Assembler::call( d, rt );
|
|
323 |
#endif
|
|
324 |
}
|
|
325 |
|
|
326 |
inline void MacroAssembler::call( Label& L, relocInfo::relocType rt ) {
|
|
327 |
MacroAssembler::call( target(L), rt);
|
|
328 |
}
|
|
329 |
|
|
330 |
|
|
331 |
|
|
332 |
inline void MacroAssembler::callr( Register s1, Register s2 ) { jmpl( s1, s2, O7 ); }
|
|
333 |
inline void MacroAssembler::callr( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, O7, rspec); }
|
|
334 |
|
|
335 |
// prefetch instruction
|
|
336 |
inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) {
|
|
337 |
if (VM_Version::v9_instructions_work())
|
|
338 |
Assembler::bp( never, true, xcc, pt, d, rt );
|
|
339 |
}
|
|
340 |
inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); }
|
|
341 |
|
|
342 |
|
|
343 |
// clobbers o7 on V8!!
|
|
344 |
// returns delta from gotten pc to addr after
|
|
345 |
inline int MacroAssembler::get_pc( Register d ) {
|
|
346 |
int x = offset();
|
|
347 |
if (VM_Version::v9_instructions_work())
|
|
348 |
rdpc(d);
|
|
349 |
else {
|
|
350 |
Label lbl;
|
|
351 |
Assembler::call(lbl, relocInfo::none); // No relocation as this is call to pc+0x8
|
|
352 |
if (d == O7) delayed()->nop();
|
|
353 |
else delayed()->mov(O7, d);
|
|
354 |
bind(lbl);
|
|
355 |
}
|
|
356 |
return offset() - x;
|
|
357 |
}
|
|
358 |
|
|
359 |
|
|
360 |
// Note: All MacroAssembler::set_foo functions are defined out-of-line.
|
|
361 |
|
|
362 |
|
|
363 |
// Loads the current PC of the following instruction as an immediate value in
|
|
364 |
// 2 instructions. All PCs in the CodeCache are within 2 Gig of each other.
|
|
365 |
inline intptr_t MacroAssembler::load_pc_address( Register reg, int bytes_to_skip ) {
|
|
366 |
intptr_t thepc = (intptr_t)pc() + 2*BytesPerInstWord + bytes_to_skip;
|
|
367 |
#ifdef _LP64
|
|
368 |
Unimplemented();
|
|
369 |
#else
|
|
370 |
Assembler::sethi( thepc & ~0x3ff, reg, internal_word_Relocation::spec((address)thepc));
|
|
371 |
add(reg, thepc & 0x3ff, reg, internal_word_Relocation::spec((address)thepc));
|
|
372 |
#endif
|
|
373 |
return thepc;
|
|
374 |
}
|
|
375 |
|
|
376 |
|
|
377 |
inline void MacroAssembler::load_contents(const AddressLiteral& addrlit, Register d, int offset) {
|
|
378 |
assert_not_delayed();
|
|
379 |
if (ForceUnreachable) {
|
|
380 |
patchable_sethi(addrlit, d);
|
|
381 |
} else {
|
|
382 |
sethi(addrlit, d);
|
|
383 |
}
|
|
384 |
ld(d, addrlit.low10() + offset, d);
|
|
385 |
}
|
|
386 |
|
|
387 |
|
|
388 |
inline void MacroAssembler::load_bool_contents(const AddressLiteral& addrlit, Register d, int offset) {
|
|
389 |
assert_not_delayed();
|
|
390 |
if (ForceUnreachable) {
|
|
391 |
patchable_sethi(addrlit, d);
|
|
392 |
} else {
|
|
393 |
sethi(addrlit, d);
|
|
394 |
}
|
|
395 |
ldub(d, addrlit.low10() + offset, d);
|
|
396 |
}
|
|
397 |
|
|
398 |
|
|
399 |
inline void MacroAssembler::load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset) {
|
|
400 |
assert_not_delayed();
|
|
401 |
if (ForceUnreachable) {
|
|
402 |
patchable_sethi(addrlit, d);
|
|
403 |
} else {
|
|
404 |
sethi(addrlit, d);
|
|
405 |
}
|
|
406 |
ld_ptr(d, addrlit.low10() + offset, d);
|
|
407 |
}
|
|
408 |
|
|
409 |
|
|
410 |
inline void MacroAssembler::store_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) {
|
|
411 |
assert_not_delayed();
|
|
412 |
if (ForceUnreachable) {
|
|
413 |
patchable_sethi(addrlit, temp);
|
|
414 |
} else {
|
|
415 |
sethi(addrlit, temp);
|
|
416 |
}
|
|
417 |
st(s, temp, addrlit.low10() + offset);
|
|
418 |
}
|
|
419 |
|
|
420 |
|
|
421 |
inline void MacroAssembler::store_ptr_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) {
|
|
422 |
assert_not_delayed();
|
|
423 |
if (ForceUnreachable) {
|
|
424 |
patchable_sethi(addrlit, temp);
|
|
425 |
} else {
|
|
426 |
sethi(addrlit, temp);
|
|
427 |
}
|
|
428 |
st_ptr(s, temp, addrlit.low10() + offset);
|
|
429 |
}
|
|
430 |
|
|
431 |
|
|
432 |
// This code sequence is relocatable to any address, even on LP64.
|
|
433 |
inline void MacroAssembler::jumpl_to(const AddressLiteral& addrlit, Register temp, Register d, int offset) {
|
|
434 |
assert_not_delayed();
|
|
435 |
// Force fixed length sethi because NativeJump and NativeFarCall don't handle
|
|
436 |
// variable length instruction streams.
|
|
437 |
patchable_sethi(addrlit, temp);
|
|
438 |
jmpl(temp, addrlit.low10() + offset, d);
|
|
439 |
}
|
|
440 |
|
|
441 |
|
|
442 |
inline void MacroAssembler::jump_to(const AddressLiteral& addrlit, Register temp, int offset) {
|
|
443 |
jumpl_to(addrlit, temp, G0, offset);
|
|
444 |
}
|
|
445 |
|
|
446 |
|
|
447 |
inline void MacroAssembler::jump_indirect_to(Address& a, Register temp,
|
|
448 |
int ld_offset, int jmp_offset) {
|
|
449 |
assert_not_delayed();
|
|
450 |
//sethi(al); // sethi is caller responsibility for this one
|
|
451 |
ld_ptr(a, temp, ld_offset);
|
|
452 |
jmp(temp, jmp_offset);
|
|
453 |
}
|
|
454 |
|
|
455 |
|
|
456 |
inline void MacroAssembler::set_metadata(Metadata* obj, Register d) {
|
|
457 |
set_metadata(allocate_metadata_address(obj), d);
|
|
458 |
}
|
|
459 |
|
|
460 |
inline void MacroAssembler::set_metadata_constant(Metadata* obj, Register d) {
|
|
461 |
set_metadata(constant_metadata_address(obj), d);
|
|
462 |
}
|
|
463 |
|
|
464 |
inline void MacroAssembler::set_metadata(const AddressLiteral& obj_addr, Register d) {
|
|
465 |
assert(obj_addr.rspec().type() == relocInfo::metadata_type, "must be a metadata reloc");
|
|
466 |
set(obj_addr, d);
|
|
467 |
}
|
|
468 |
|
|
469 |
inline void MacroAssembler::set_oop(jobject obj, Register d) {
|
|
470 |
set_oop(allocate_oop_address(obj), d);
|
|
471 |
}
|
|
472 |
|
|
473 |
|
|
474 |
inline void MacroAssembler::set_oop_constant(jobject obj, Register d) {
|
|
475 |
set_oop(constant_oop_address(obj), d);
|
|
476 |
}
|
|
477 |
|
|
478 |
|
|
479 |
inline void MacroAssembler::set_oop(const AddressLiteral& obj_addr, Register d) {
|
|
480 |
assert(obj_addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
|
|
481 |
set(obj_addr, d);
|
|
482 |
}
|
|
483 |
|
|
484 |
|
|
485 |
inline void MacroAssembler::load_argument( Argument& a, Register d ) {
|
|
486 |
if (a.is_register())
|
|
487 |
mov(a.as_register(), d);
|
|
488 |
else
|
|
489 |
ld (a.as_address(), d);
|
|
490 |
}
|
|
491 |
|
|
492 |
inline void MacroAssembler::store_argument( Register s, Argument& a ) {
|
|
493 |
if (a.is_register())
|
|
494 |
mov(s, a.as_register());
|
|
495 |
else
|
|
496 |
st_ptr (s, a.as_address()); // ABI says everything is right justified.
|
|
497 |
}
|
|
498 |
|
|
499 |
inline void MacroAssembler::store_ptr_argument( Register s, Argument& a ) {
|
|
500 |
if (a.is_register())
|
|
501 |
mov(s, a.as_register());
|
|
502 |
else
|
|
503 |
st_ptr (s, a.as_address());
|
|
504 |
}
|
|
505 |
|
|
506 |
|
|
507 |
#ifdef _LP64
|
|
508 |
inline void MacroAssembler::store_float_argument( FloatRegister s, Argument& a ) {
|
|
509 |
if (a.is_float_register())
|
|
510 |
// V9 ABI has F1, F3, F5 are used to pass instead of O0, O1, O2
|
|
511 |
fmov(FloatRegisterImpl::S, s, a.as_float_register() );
|
|
512 |
else
|
|
513 |
// Floats are stored in the high half of the stack entry
|
|
514 |
// The low half is undefined per the ABI.
|
|
515 |
stf(FloatRegisterImpl::S, s, a.as_address(), sizeof(jfloat));
|
|
516 |
}
|
|
517 |
|
|
518 |
inline void MacroAssembler::store_double_argument( FloatRegister s, Argument& a ) {
|
|
519 |
if (a.is_float_register())
|
|
520 |
// V9 ABI has D0, D2, D4 are used to pass instead of O0, O1, O2
|
|
521 |
fmov(FloatRegisterImpl::D, s, a.as_double_register() );
|
|
522 |
else
|
|
523 |
stf(FloatRegisterImpl::D, s, a.as_address());
|
|
524 |
}
|
|
525 |
|
|
526 |
inline void MacroAssembler::store_long_argument( Register s, Argument& a ) {
|
|
527 |
if (a.is_register())
|
|
528 |
mov(s, a.as_register());
|
|
529 |
else
|
|
530 |
stx(s, a.as_address());
|
|
531 |
}
|
|
532 |
#endif
|
|
533 |
|
|
534 |
inline void MacroAssembler::add(Register s1, int simm13a, Register d, relocInfo::relocType rtype) {
|
|
535 |
relocate(rtype);
|
|
536 |
add(s1, simm13a, d);
|
|
537 |
}
|
|
538 |
inline void MacroAssembler::add(Register s1, int simm13a, Register d, RelocationHolder const& rspec) {
|
|
539 |
relocate(rspec);
|
|
540 |
add(s1, simm13a, d);
|
|
541 |
}
|
|
542 |
|
|
543 |
// form effective addresses this way:
|
|
544 |
inline void MacroAssembler::add(const Address& a, Register d, int offset) {
|
|
545 |
if (a.has_index()) add(a.base(), a.index(), d);
|
|
546 |
else { add(a.base(), a.disp() + offset, d, a.rspec(offset)); offset = 0; }
|
|
547 |
if (offset != 0) add(d, offset, d);
|
|
548 |
}
|
|
549 |
inline void MacroAssembler::add(Register s1, RegisterOrConstant s2, Register d, int offset) {
|
|
550 |
if (s2.is_register()) add(s1, s2.as_register(), d);
|
|
551 |
else { add(s1, s2.as_constant() + offset, d); offset = 0; }
|
|
552 |
if (offset != 0) add(d, offset, d);
|
|
553 |
}
|
|
554 |
|
|
555 |
inline void MacroAssembler::andn(Register s1, RegisterOrConstant s2, Register d) {
|
|
556 |
if (s2.is_register()) andn(s1, s2.as_register(), d);
|
|
557 |
else andn(s1, s2.as_constant(), d);
|
|
558 |
}
|
|
559 |
|
|
560 |
inline void MacroAssembler::clrb( Register s1, Register s2) { stb( G0, s1, s2 ); }
|
|
561 |
inline void MacroAssembler::clrh( Register s1, Register s2) { sth( G0, s1, s2 ); }
|
|
562 |
inline void MacroAssembler::clr( Register s1, Register s2) { stw( G0, s1, s2 ); }
|
|
563 |
inline void MacroAssembler::clrx( Register s1, Register s2) { stx( G0, s1, s2 ); }
|
|
564 |
|
|
565 |
inline void MacroAssembler::clrb( Register s1, int simm13a) { stb( G0, s1, simm13a); }
|
|
566 |
inline void MacroAssembler::clrh( Register s1, int simm13a) { sth( G0, s1, simm13a); }
|
|
567 |
inline void MacroAssembler::clr( Register s1, int simm13a) { stw( G0, s1, simm13a); }
|
|
568 |
inline void MacroAssembler::clrx( Register s1, int simm13a) { stx( G0, s1, simm13a); }
|
|
569 |
|
|
570 |
#ifdef _LP64
|
|
571 |
// Make all 32 bit loads signed so 64 bit registers maintain proper sign
|
|
572 |
inline void MacroAssembler::ld( Register s1, Register s2, Register d) { ldsw( s1, s2, d); }
|
|
573 |
inline void MacroAssembler::ld( Register s1, int simm13a, Register d) { ldsw( s1, simm13a, d); }
|
|
574 |
#else
|
|
575 |
inline void MacroAssembler::ld( Register s1, Register s2, Register d) { lduw( s1, s2, d); }
|
|
576 |
inline void MacroAssembler::ld( Register s1, int simm13a, Register d) { lduw( s1, simm13a, d); }
|
|
577 |
#endif
|
|
578 |
|
|
579 |
#ifdef ASSERT
|
|
580 |
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
|
|
581 |
# ifdef _LP64
|
|
582 |
inline void MacroAssembler::ld(Register s1, ByteSize simm13a, Register d) { ldsw( s1, in_bytes(simm13a), d); }
|
|
583 |
# else
|
|
584 |
inline void MacroAssembler::ld(Register s1, ByteSize simm13a, Register d) { lduw( s1, in_bytes(simm13a), d); }
|
|
585 |
# endif
|
|
586 |
#endif
|
|
587 |
|
|
588 |
inline void MacroAssembler::ld( const Address& a, Register d, int offset) {
|
|
589 |
if (a.has_index()) { assert(offset == 0, ""); ld( a.base(), a.index(), d); }
|
|
590 |
else { ld( a.base(), a.disp() + offset, d); }
|
|
591 |
}
|
|
592 |
|
|
593 |
inline void MacroAssembler::ldsb(const Address& a, Register d, int offset) {
|
|
594 |
if (a.has_index()) { assert(offset == 0, ""); ldsb(a.base(), a.index(), d); }
|
|
595 |
else { ldsb(a.base(), a.disp() + offset, d); }
|
|
596 |
}
|
|
597 |
inline void MacroAssembler::ldsh(const Address& a, Register d, int offset) {
|
|
598 |
if (a.has_index()) { assert(offset == 0, ""); ldsh(a.base(), a.index(), d); }
|
|
599 |
else { ldsh(a.base(), a.disp() + offset, d); }
|
|
600 |
}
|
|
601 |
inline void MacroAssembler::ldsw(const Address& a, Register d, int offset) {
|
|
602 |
if (a.has_index()) { assert(offset == 0, ""); ldsw(a.base(), a.index(), d); }
|
|
603 |
else { ldsw(a.base(), a.disp() + offset, d); }
|
|
604 |
}
|
|
605 |
inline void MacroAssembler::ldub(const Address& a, Register d, int offset) {
|
|
606 |
if (a.has_index()) { assert(offset == 0, ""); ldub(a.base(), a.index(), d); }
|
|
607 |
else { ldub(a.base(), a.disp() + offset, d); }
|
|
608 |
}
|
|
609 |
inline void MacroAssembler::lduh(const Address& a, Register d, int offset) {
|
|
610 |
if (a.has_index()) { assert(offset == 0, ""); lduh(a.base(), a.index(), d); }
|
|
611 |
else { lduh(a.base(), a.disp() + offset, d); }
|
|
612 |
}
|
|
613 |
inline void MacroAssembler::lduw(const Address& a, Register d, int offset) {
|
|
614 |
if (a.has_index()) { assert(offset == 0, ""); lduw(a.base(), a.index(), d); }
|
|
615 |
else { lduw(a.base(), a.disp() + offset, d); }
|
|
616 |
}
|
|
617 |
inline void MacroAssembler::ldd( const Address& a, Register d, int offset) {
|
|
618 |
if (a.has_index()) { assert(offset == 0, ""); ldd( a.base(), a.index(), d); }
|
|
619 |
else { ldd( a.base(), a.disp() + offset, d); }
|
|
620 |
}
|
|
621 |
inline void MacroAssembler::ldx( const Address& a, Register d, int offset) {
|
|
622 |
if (a.has_index()) { assert(offset == 0, ""); ldx( a.base(), a.index(), d); }
|
|
623 |
else { ldx( a.base(), a.disp() + offset, d); }
|
|
624 |
}
|
|
625 |
|
|
626 |
inline void MacroAssembler::ldub(Register s1, RegisterOrConstant s2, Register d) { ldub(Address(s1, s2), d); }
|
|
627 |
inline void MacroAssembler::ldsb(Register s1, RegisterOrConstant s2, Register d) { ldsb(Address(s1, s2), d); }
|
|
628 |
inline void MacroAssembler::lduh(Register s1, RegisterOrConstant s2, Register d) { lduh(Address(s1, s2), d); }
|
|
629 |
inline void MacroAssembler::ldsh(Register s1, RegisterOrConstant s2, Register d) { ldsh(Address(s1, s2), d); }
|
|
630 |
inline void MacroAssembler::lduw(Register s1, RegisterOrConstant s2, Register d) { lduw(Address(s1, s2), d); }
|
|
631 |
inline void MacroAssembler::ldsw(Register s1, RegisterOrConstant s2, Register d) { ldsw(Address(s1, s2), d); }
|
|
632 |
inline void MacroAssembler::ldx( Register s1, RegisterOrConstant s2, Register d) { ldx( Address(s1, s2), d); }
|
|
633 |
inline void MacroAssembler::ld( Register s1, RegisterOrConstant s2, Register d) { ld( Address(s1, s2), d); }
|
|
634 |
inline void MacroAssembler::ldd( Register s1, RegisterOrConstant s2, Register d) { ldd( Address(s1, s2), d); }
|
|
635 |
|
|
636 |
inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, Register s1, RegisterOrConstant s2, FloatRegister d) {
|
|
637 |
if (s2.is_register()) ldf(w, s1, s2.as_register(), d);
|
|
638 |
else ldf(w, s1, s2.as_constant(), d);
|
|
639 |
}
|
|
640 |
|
|
641 |
inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset) {
|
|
642 |
relocate(a.rspec(offset));
|
|
643 |
ldf(w, a.base(), a.disp() + offset, d);
|
|
644 |
}
|
|
645 |
|
|
646 |
// returns if membar generates anything, obviously this code should mirror
|
|
647 |
// membar below.
|
|
648 |
inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) {
|
|
649 |
if( !os::is_MP() ) return false; // Not needed on single CPU
|
|
650 |
if( VM_Version::v9_instructions_work() ) {
|
|
651 |
const Membar_mask_bits effective_mask =
|
|
652 |
Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
|
|
653 |
return (effective_mask != 0);
|
|
654 |
} else {
|
|
655 |
return true;
|
|
656 |
}
|
|
657 |
}
|
|
658 |
|
|
659 |
inline void MacroAssembler::membar( Membar_mask_bits const7a ) {
|
|
660 |
// Uniprocessors do not need memory barriers
|
|
661 |
if (!os::is_MP()) return;
|
|
662 |
// Weakened for current Sparcs and TSO. See the v9 manual, sections 8.4.3,
|
|
663 |
// 8.4.4.3, a.31 and a.50.
|
|
664 |
if( VM_Version::v9_instructions_work() ) {
|
|
665 |
// Under TSO, setting bit 3, 2, or 0 is redundant, so the only value
|
|
666 |
// of the mmask subfield of const7a that does anything that isn't done
|
|
667 |
// implicitly is StoreLoad.
|
|
668 |
const Membar_mask_bits effective_mask =
|
|
669 |
Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
|
|
670 |
if ( effective_mask != 0 ) {
|
|
671 |
Assembler::membar( effective_mask );
|
|
672 |
}
|
|
673 |
} else {
|
|
674 |
// stbar is the closest there is on v8. Equivalent to membar(StoreStore). We
|
|
675 |
// do not issue the stbar because to my knowledge all v8 machines implement TSO,
|
|
676 |
// which guarantees that all stores behave as if an stbar were issued just after
|
|
677 |
// each one of them. On these machines, stbar ought to be a nop. There doesn't
|
|
678 |
// appear to be an equivalent of membar(StoreLoad) on v8: TSO doesn't require it,
|
|
679 |
// it can't be specified by stbar, nor have I come up with a way to simulate it.
|
|
680 |
//
|
|
681 |
// Addendum. Dave says that ldstub guarantees a write buffer flush to coherent
|
|
682 |
// space. Put one here to be on the safe side.
|
|
683 |
Assembler::ldstub(SP, 0, G0);
|
|
684 |
}
|
|
685 |
}
|
|
686 |
|
|
687 |
inline void MacroAssembler::prefetch(const Address& a, PrefetchFcn f, int offset) {
|
|
688 |
relocate(a.rspec(offset));
|
|
689 |
assert(!a.has_index(), "");
|
|
690 |
prefetch(a.base(), a.disp() + offset, f);
|
|
691 |
}
|
|
692 |
|
|
693 |
inline void MacroAssembler::st(Register d, Register s1, Register s2) { stw(d, s1, s2); }
|
|
694 |
inline void MacroAssembler::st(Register d, Register s1, int simm13a) { stw(d, s1, simm13a); }
|
|
695 |
|
|
696 |
#ifdef ASSERT
|
|
697 |
// ByteSize is only a class when ASSERT is defined, otherwise it's an int.
|
|
698 |
inline void MacroAssembler::st(Register d, Register s1, ByteSize simm13a) { stw(d, s1, in_bytes(simm13a)); }
|
|
699 |
#endif
|
|
700 |
|
|
701 |
inline void MacroAssembler::st(Register d, const Address& a, int offset) {
|
|
702 |
if (a.has_index()) { assert(offset == 0, ""); st( d, a.base(), a.index() ); }
|
|
703 |
else { st( d, a.base(), a.disp() + offset); }
|
|
704 |
}
|
|
705 |
|
|
706 |
inline void MacroAssembler::stb(Register d, const Address& a, int offset) {
|
|
707 |
if (a.has_index()) { assert(offset == 0, ""); stb(d, a.base(), a.index() ); }
|
|
708 |
else { stb(d, a.base(), a.disp() + offset); }
|
|
709 |
}
|
|
710 |
inline void MacroAssembler::sth(Register d, const Address& a, int offset) {
|
|
711 |
if (a.has_index()) { assert(offset == 0, ""); sth(d, a.base(), a.index() ); }
|
|
712 |
else { sth(d, a.base(), a.disp() + offset); }
|
|
713 |
}
|
|
714 |
inline void MacroAssembler::stw(Register d, const Address& a, int offset) {
|
|
715 |
if (a.has_index()) { assert(offset == 0, ""); stw(d, a.base(), a.index() ); }
|
|
716 |
else { stw(d, a.base(), a.disp() + offset); }
|
|
717 |
}
|
|
718 |
inline void MacroAssembler::std(Register d, const Address& a, int offset) {
|
|
719 |
if (a.has_index()) { assert(offset == 0, ""); std(d, a.base(), a.index() ); }
|
|
720 |
else { std(d, a.base(), a.disp() + offset); }
|
|
721 |
}
|
|
722 |
inline void MacroAssembler::stx(Register d, const Address& a, int offset) {
|
|
723 |
if (a.has_index()) { assert(offset == 0, ""); stx(d, a.base(), a.index() ); }
|
|
724 |
else { stx(d, a.base(), a.disp() + offset); }
|
|
725 |
}
|
|
726 |
|
|
727 |
inline void MacroAssembler::stb(Register d, Register s1, RegisterOrConstant s2) { stb(d, Address(s1, s2)); }
|
|
728 |
inline void MacroAssembler::sth(Register d, Register s1, RegisterOrConstant s2) { sth(d, Address(s1, s2)); }
|
|
729 |
inline void MacroAssembler::stw(Register d, Register s1, RegisterOrConstant s2) { stw(d, Address(s1, s2)); }
|
|
730 |
inline void MacroAssembler::stx(Register d, Register s1, RegisterOrConstant s2) { stx(d, Address(s1, s2)); }
|
|
731 |
inline void MacroAssembler::std(Register d, Register s1, RegisterOrConstant s2) { std(d, Address(s1, s2)); }
|
|
732 |
inline void MacroAssembler::st( Register d, Register s1, RegisterOrConstant s2) { st( d, Address(s1, s2)); }
|
|
733 |
|
|
734 |
inline void MacroAssembler::stf(FloatRegisterImpl::Width w, FloatRegister d, Register s1, RegisterOrConstant s2) {
|
|
735 |
if (s2.is_register()) stf(w, d, s1, s2.as_register());
|
|
736 |
else stf(w, d, s1, s2.as_constant());
|
|
737 |
}
|
|
738 |
|
|
739 |
inline void MacroAssembler::stf(FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset) {
|
|
740 |
relocate(a.rspec(offset));
|
|
741 |
if (a.has_index()) { assert(offset == 0, ""); stf(w, d, a.base(), a.index() ); }
|
|
742 |
else { stf(w, d, a.base(), a.disp() + offset); }
|
|
743 |
}
|
|
744 |
|
|
745 |
inline void MacroAssembler::sub(Register s1, RegisterOrConstant s2, Register d, int offset) {
|
|
746 |
if (s2.is_register()) sub(s1, s2.as_register(), d);
|
|
747 |
else { sub(s1, s2.as_constant() + offset, d); offset = 0; }
|
|
748 |
if (offset != 0) sub(d, offset, d);
|
|
749 |
}
|
|
750 |
|
|
751 |
inline void MacroAssembler::swap(Address& a, Register d, int offset) {
|
|
752 |
relocate(a.rspec(offset));
|
|
753 |
if (a.has_index()) { assert(offset == 0, ""); swap(a.base(), a.index(), d ); }
|
|
754 |
else { swap(a.base(), a.disp() + offset, d); }
|
|
755 |
}
|
|
756 |
|
|
757 |
#endif // CPU_SPARC_VM_MACROASSEMBLER_SPARC_INLINE_HPP
|