author | mikael |
Wed, 06 Nov 2013 06:51:24 -0800 | |
changeset 21528 | 479228ecf6ac |
parent 21188 | d053e4e8f901 |
child 21923 | 5cd9eb764fe9 |
permissions | -rw-r--r-- |
14626 | 1 |
/* |
18507
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
2 |
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. |
14626 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 |
* or visit www.oracle.com if you need additional information or have any |
|
21 |
* questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
25 |
#include "precompiled.hpp" |
|
26 |
#include "asm/assembler.hpp" |
|
27 |
#include "asm/assembler.inline.hpp" |
|
28 |
#include "compiler/disassembler.hpp" |
|
29 |
#include "gc_interface/collectedHeap.inline.hpp" |
|
30 |
#include "interpreter/interpreter.hpp" |
|
31 |
#include "memory/cardTableModRefBS.hpp" |
|
32 |
#include "memory/resourceArea.hpp" |
|
19319
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
33 |
#include "memory/universe.hpp" |
14626 | 34 |
#include "prims/methodHandles.hpp" |
35 |
#include "runtime/biasedLocking.hpp" |
|
36 |
#include "runtime/interfaceSupport.hpp" |
|
37 |
#include "runtime/objectMonitor.hpp" |
|
38 |
#include "runtime/os.hpp" |
|
39 |
#include "runtime/sharedRuntime.hpp" |
|
40 |
#include "runtime/stubRoutines.hpp" |
|
15482
470d0b0c09f1
8005915: Unify SERIALGC and INCLUDE_ALTERNATE_GCS
jprovino
parents:
15117
diff
changeset
|
41 |
#include "utilities/macros.hpp" |
470d0b0c09f1
8005915: Unify SERIALGC and INCLUDE_ALTERNATE_GCS
jprovino
parents:
15117
diff
changeset
|
42 |
#if INCLUDE_ALL_GCS |
14626 | 43 |
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" |
44 |
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" |
|
45 |
#include "gc_implementation/g1/heapRegion.hpp" |
|
15482
470d0b0c09f1
8005915: Unify SERIALGC and INCLUDE_ALTERNATE_GCS
jprovino
parents:
15117
diff
changeset
|
46 |
#endif // INCLUDE_ALL_GCS |
14626 | 47 |
|
48 |
#ifdef PRODUCT |
|
49 |
#define BLOCK_COMMENT(str) /* nothing */ |
|
50 |
#define STOP(error) stop(error) |
|
51 |
#else |
|
52 |
#define BLOCK_COMMENT(str) block_comment(str) |
|
53 |
#define STOP(error) block_comment(error); stop(error) |
|
54 |
#endif |
|
55 |
||
56 |
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") |
|
57 |
||
58 |
||
14631
526804361522
8003250: SPARC: move MacroAssembler into separate file
twisti
parents:
14626
diff
changeset
|
59 |
#ifdef ASSERT |
526804361522
8003250: SPARC: move MacroAssembler into separate file
twisti
parents:
14626
diff
changeset
|
60 |
bool AbstractAssembler::pd_check_instruction_mark() { return true; } |
526804361522
8003250: SPARC: move MacroAssembler into separate file
twisti
parents:
14626
diff
changeset
|
61 |
#endif |
526804361522
8003250: SPARC: move MacroAssembler into separate file
twisti
parents:
14626
diff
changeset
|
62 |
|
14626 | 63 |
static Assembler::Condition reverse[] = { |
64 |
Assembler::noOverflow /* overflow = 0x0 */ , |
|
65 |
Assembler::overflow /* noOverflow = 0x1 */ , |
|
66 |
Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ , |
|
67 |
Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ , |
|
68 |
Assembler::notZero /* zero = 0x4, equal = 0x4 */ , |
|
69 |
Assembler::zero /* notZero = 0x5, notEqual = 0x5 */ , |
|
70 |
Assembler::above /* belowEqual = 0x6 */ , |
|
71 |
Assembler::belowEqual /* above = 0x7 */ , |
|
72 |
Assembler::positive /* negative = 0x8 */ , |
|
73 |
Assembler::negative /* positive = 0x9 */ , |
|
74 |
Assembler::noParity /* parity = 0xa */ , |
|
75 |
Assembler::parity /* noParity = 0xb */ , |
|
76 |
Assembler::greaterEqual /* less = 0xc */ , |
|
77 |
Assembler::less /* greaterEqual = 0xd */ , |
|
78 |
Assembler::greater /* lessEqual = 0xe */ , |
|
79 |
Assembler::lessEqual /* greater = 0xf, */ |
|
80 |
||
81 |
}; |
|
82 |
||
83 |
||
84 |
// Implementation of MacroAssembler |
|
85 |
||
86 |
// First all the versions that have distinct versions depending on 32/64 bit |
|
87 |
// Unless the difference is trivial (1 line or so). |
|
88 |
||
89 |
#ifndef _LP64 |
|
90 |
||
91 |
// 32bit versions |
|
92 |
||
93 |
Address MacroAssembler::as_Address(AddressLiteral adr) { |
|
94 |
return Address(adr.target(), adr.rspec()); |
|
95 |
} |
|
96 |
||
97 |
Address MacroAssembler::as_Address(ArrayAddress adr) { |
|
98 |
return Address::make_array(adr); |
|
99 |
} |
|
100 |
||
101 |
int MacroAssembler::biased_locking_enter(Register lock_reg, |
|
102 |
Register obj_reg, |
|
103 |
Register swap_reg, |
|
104 |
Register tmp_reg, |
|
105 |
bool swap_reg_contains_mark, |
|
106 |
Label& done, |
|
107 |
Label* slow_case, |
|
108 |
BiasedLockingCounters* counters) { |
|
109 |
assert(UseBiasedLocking, "why call this otherwise?"); |
|
110 |
assert(swap_reg == rax, "swap_reg must be rax, for cmpxchg"); |
|
111 |
assert_different_registers(lock_reg, obj_reg, swap_reg); |
|
112 |
||
113 |
if (PrintBiasedLockingStatistics && counters == NULL) |
|
114 |
counters = BiasedLocking::counters(); |
|
115 |
||
116 |
bool need_tmp_reg = false; |
|
117 |
if (tmp_reg == noreg) { |
|
118 |
need_tmp_reg = true; |
|
119 |
tmp_reg = lock_reg; |
|
120 |
} else { |
|
121 |
assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg); |
|
122 |
} |
|
123 |
assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); |
|
124 |
Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes()); |
|
125 |
Address klass_addr (obj_reg, oopDesc::klass_offset_in_bytes()); |
|
126 |
Address saved_mark_addr(lock_reg, 0); |
|
127 |
||
128 |
// Biased locking |
|
129 |
// See whether the lock is currently biased toward our thread and |
|
130 |
// whether the epoch is still valid |
|
131 |
// Note that the runtime guarantees sufficient alignment of JavaThread |
|
132 |
// pointers to allow age to be placed into low bits |
|
133 |
// First check to see whether biasing is even enabled for this object |
|
134 |
Label cas_label; |
|
135 |
int null_check_offset = -1; |
|
136 |
if (!swap_reg_contains_mark) { |
|
137 |
null_check_offset = offset(); |
|
138 |
movl(swap_reg, mark_addr); |
|
139 |
} |
|
140 |
if (need_tmp_reg) { |
|
141 |
push(tmp_reg); |
|
142 |
} |
|
143 |
movl(tmp_reg, swap_reg); |
|
144 |
andl(tmp_reg, markOopDesc::biased_lock_mask_in_place); |
|
145 |
cmpl(tmp_reg, markOopDesc::biased_lock_pattern); |
|
146 |
if (need_tmp_reg) { |
|
147 |
pop(tmp_reg); |
|
148 |
} |
|
149 |
jcc(Assembler::notEqual, cas_label); |
|
150 |
// The bias pattern is present in the object's header. Need to check |
|
151 |
// whether the bias owner and the epoch are both still current. |
|
152 |
// Note that because there is no current thread register on x86 we |
|
153 |
// need to store off the mark word we read out of the object to |
|
154 |
// avoid reloading it and needing to recheck invariants below. This |
|
155 |
// store is unfortunate but it makes the overall code shorter and |
|
156 |
// simpler. |
|
157 |
movl(saved_mark_addr, swap_reg); |
|
158 |
if (need_tmp_reg) { |
|
159 |
push(tmp_reg); |
|
160 |
} |
|
161 |
get_thread(tmp_reg); |
|
162 |
xorl(swap_reg, tmp_reg); |
|
163 |
if (swap_reg_contains_mark) { |
|
164 |
null_check_offset = offset(); |
|
165 |
} |
|
166 |
movl(tmp_reg, klass_addr); |
|
167 |
xorl(swap_reg, Address(tmp_reg, Klass::prototype_header_offset())); |
|
168 |
andl(swap_reg, ~((int) markOopDesc::age_mask_in_place)); |
|
169 |
if (need_tmp_reg) { |
|
170 |
pop(tmp_reg); |
|
171 |
} |
|
172 |
if (counters != NULL) { |
|
173 |
cond_inc32(Assembler::zero, |
|
174 |
ExternalAddress((address)counters->biased_lock_entry_count_addr())); |
|
175 |
} |
|
176 |
jcc(Assembler::equal, done); |
|
177 |
||
178 |
Label try_revoke_bias; |
|
179 |
Label try_rebias; |
|
180 |
||
181 |
// At this point we know that the header has the bias pattern and |
|
182 |
// that we are not the bias owner in the current epoch. We need to |
|
183 |
// figure out more details about the state of the header in order to |
|
184 |
// know what operations can be legally performed on the object's |
|
185 |
// header. |
|
186 |
||
187 |
// If the low three bits in the xor result aren't clear, that means |
|
188 |
// the prototype header is no longer biased and we have to revoke |
|
189 |
// the bias on this object. |
|
190 |
testl(swap_reg, markOopDesc::biased_lock_mask_in_place); |
|
191 |
jcc(Assembler::notZero, try_revoke_bias); |
|
192 |
||
193 |
// Biasing is still enabled for this data type. See whether the |
|
194 |
// epoch of the current bias is still valid, meaning that the epoch |
|
195 |
// bits of the mark word are equal to the epoch bits of the |
|
196 |
// prototype header. (Note that the prototype header's epoch bits |
|
197 |
// only change at a safepoint.) If not, attempt to rebias the object |
|
198 |
// toward the current thread. Note that we must be absolutely sure |
|
199 |
// that the current epoch is invalid in order to do this because |
|
200 |
// otherwise the manipulations it performs on the mark word are |
|
201 |
// illegal. |
|
202 |
testl(swap_reg, markOopDesc::epoch_mask_in_place); |
|
203 |
jcc(Assembler::notZero, try_rebias); |
|
204 |
||
205 |
// The epoch of the current bias is still valid but we know nothing |
|
206 |
// about the owner; it might be set or it might be clear. Try to |
|
207 |
// acquire the bias of the object using an atomic operation. If this |
|
208 |
// fails we will go in to the runtime to revoke the object's bias. |
|
209 |
// Note that we first construct the presumed unbiased header so we |
|
210 |
// don't accidentally blow away another thread's valid bias. |
|
211 |
movl(swap_reg, saved_mark_addr); |
|
212 |
andl(swap_reg, |
|
213 |
markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place); |
|
214 |
if (need_tmp_reg) { |
|
215 |
push(tmp_reg); |
|
216 |
} |
|
217 |
get_thread(tmp_reg); |
|
218 |
orl(tmp_reg, swap_reg); |
|
219 |
if (os::is_MP()) { |
|
220 |
lock(); |
|
221 |
} |
|
222 |
cmpxchgptr(tmp_reg, Address(obj_reg, 0)); |
|
223 |
if (need_tmp_reg) { |
|
224 |
pop(tmp_reg); |
|
225 |
} |
|
226 |
// If the biasing toward our thread failed, this means that |
|
227 |
// another thread succeeded in biasing it toward itself and we |
|
228 |
// need to revoke that bias. The revocation will occur in the |
|
229 |
// interpreter runtime in the slow case. |
|
230 |
if (counters != NULL) { |
|
231 |
cond_inc32(Assembler::zero, |
|
232 |
ExternalAddress((address)counters->anonymously_biased_lock_entry_count_addr())); |
|
233 |
} |
|
234 |
if (slow_case != NULL) { |
|
235 |
jcc(Assembler::notZero, *slow_case); |
|
236 |
} |
|
237 |
jmp(done); |
|
238 |
||
239 |
bind(try_rebias); |
|
240 |
// At this point we know the epoch has expired, meaning that the |
|
241 |
// current "bias owner", if any, is actually invalid. Under these |
|
242 |
// circumstances _only_, we are allowed to use the current header's |
|
243 |
// value as the comparison value when doing the cas to acquire the |
|
244 |
// bias in the current epoch. In other words, we allow transfer of |
|
245 |
// the bias from one thread to another directly in this situation. |
|
246 |
// |
|
247 |
// FIXME: due to a lack of registers we currently blow away the age |
|
248 |
// bits in this situation. Should attempt to preserve them. |
|
249 |
if (need_tmp_reg) { |
|
250 |
push(tmp_reg); |
|
251 |
} |
|
252 |
get_thread(tmp_reg); |
|
253 |
movl(swap_reg, klass_addr); |
|
254 |
orl(tmp_reg, Address(swap_reg, Klass::prototype_header_offset())); |
|
255 |
movl(swap_reg, saved_mark_addr); |
|
256 |
if (os::is_MP()) { |
|
257 |
lock(); |
|
258 |
} |
|
259 |
cmpxchgptr(tmp_reg, Address(obj_reg, 0)); |
|
260 |
if (need_tmp_reg) { |
|
261 |
pop(tmp_reg); |
|
262 |
} |
|
263 |
// If the biasing toward our thread failed, then another thread |
|
264 |
// succeeded in biasing it toward itself and we need to revoke that |
|
265 |
// bias. The revocation will occur in the runtime in the slow case. |
|
266 |
if (counters != NULL) { |
|
267 |
cond_inc32(Assembler::zero, |
|
268 |
ExternalAddress((address)counters->rebiased_lock_entry_count_addr())); |
|
269 |
} |
|
270 |
if (slow_case != NULL) { |
|
271 |
jcc(Assembler::notZero, *slow_case); |
|
272 |
} |
|
273 |
jmp(done); |
|
274 |
||
275 |
bind(try_revoke_bias); |
|
276 |
// The prototype mark in the klass doesn't have the bias bit set any |
|
277 |
// more, indicating that objects of this data type are not supposed |
|
278 |
// to be biased any more. We are going to try to reset the mark of |
|
279 |
// this object to the prototype value and fall through to the |
|
280 |
// CAS-based locking scheme. Note that if our CAS fails, it means |
|
281 |
// that another thread raced us for the privilege of revoking the |
|
282 |
// bias of this particular object, so it's okay to continue in the |
|
283 |
// normal locking code. |
|
284 |
// |
|
285 |
// FIXME: due to a lack of registers we currently blow away the age |
|
286 |
// bits in this situation. Should attempt to preserve them. |
|
287 |
movl(swap_reg, saved_mark_addr); |
|
288 |
if (need_tmp_reg) { |
|
289 |
push(tmp_reg); |
|
290 |
} |
|
291 |
movl(tmp_reg, klass_addr); |
|
292 |
movl(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset())); |
|
293 |
if (os::is_MP()) { |
|
294 |
lock(); |
|
295 |
} |
|
296 |
cmpxchgptr(tmp_reg, Address(obj_reg, 0)); |
|
297 |
if (need_tmp_reg) { |
|
298 |
pop(tmp_reg); |
|
299 |
} |
|
300 |
// Fall through to the normal CAS-based lock, because no matter what |
|
301 |
// the result of the above CAS, some thread must have succeeded in |
|
302 |
// removing the bias bit from the object's header. |
|
303 |
if (counters != NULL) { |
|
304 |
cond_inc32(Assembler::zero, |
|
305 |
ExternalAddress((address)counters->revoked_lock_entry_count_addr())); |
|
306 |
} |
|
307 |
||
308 |
bind(cas_label); |
|
309 |
||
310 |
return null_check_offset; |
|
311 |
} |
|
312 |
void MacroAssembler::call_VM_leaf_base(address entry_point, |
|
313 |
int number_of_arguments) { |
|
314 |
call(RuntimeAddress(entry_point)); |
|
315 |
increment(rsp, number_of_arguments * wordSize); |
|
316 |
} |
|
317 |
||
318 |
void MacroAssembler::cmpklass(Address src1, Metadata* obj) { |
|
319 |
cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate()); |
|
320 |
} |
|
321 |
||
322 |
void MacroAssembler::cmpklass(Register src1, Metadata* obj) { |
|
323 |
cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate()); |
|
324 |
} |
|
325 |
||
326 |
void MacroAssembler::cmpoop(Address src1, jobject obj) { |
|
327 |
cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate()); |
|
328 |
} |
|
329 |
||
330 |
void MacroAssembler::cmpoop(Register src1, jobject obj) { |
|
331 |
cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate()); |
|
332 |
} |
|
333 |
||
334 |
void MacroAssembler::extend_sign(Register hi, Register lo) { |
|
335 |
// According to Intel Doc. AP-526, "Integer Divide", p.18. |
|
336 |
if (VM_Version::is_P6() && hi == rdx && lo == rax) { |
|
337 |
cdql(); |
|
338 |
} else { |
|
339 |
movl(hi, lo); |
|
340 |
sarl(hi, 31); |
|
341 |
} |
|
342 |
} |
|
343 |
||
344 |
void MacroAssembler::jC2(Register tmp, Label& L) { |
|
345 |
// set parity bit if FPU flag C2 is set (via rax) |
|
346 |
save_rax(tmp); |
|
347 |
fwait(); fnstsw_ax(); |
|
348 |
sahf(); |
|
349 |
restore_rax(tmp); |
|
350 |
// branch |
|
351 |
jcc(Assembler::parity, L); |
|
352 |
} |
|
353 |
||
354 |
void MacroAssembler::jnC2(Register tmp, Label& L) { |
|
355 |
// set parity bit if FPU flag C2 is set (via rax) |
|
356 |
save_rax(tmp); |
|
357 |
fwait(); fnstsw_ax(); |
|
358 |
sahf(); |
|
359 |
restore_rax(tmp); |
|
360 |
// branch |
|
361 |
jcc(Assembler::noParity, L); |
|
362 |
} |
|
363 |
||
364 |
// 32bit can do a case table jump in one instruction but we no longer allow the base |
|
365 |
// to be installed in the Address class |
|
366 |
void MacroAssembler::jump(ArrayAddress entry) { |
|
367 |
jmp(as_Address(entry)); |
|
368 |
} |
|
369 |
||
370 |
// Note: y_lo will be destroyed |
|
371 |
void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) { |
|
372 |
// Long compare for Java (semantics as described in JVM spec.) |
|
373 |
Label high, low, done; |
|
374 |
||
375 |
cmpl(x_hi, y_hi); |
|
376 |
jcc(Assembler::less, low); |
|
377 |
jcc(Assembler::greater, high); |
|
378 |
// x_hi is the return register |
|
379 |
xorl(x_hi, x_hi); |
|
380 |
cmpl(x_lo, y_lo); |
|
381 |
jcc(Assembler::below, low); |
|
382 |
jcc(Assembler::equal, done); |
|
383 |
||
384 |
bind(high); |
|
385 |
xorl(x_hi, x_hi); |
|
386 |
increment(x_hi); |
|
387 |
jmp(done); |
|
388 |
||
389 |
bind(low); |
|
390 |
xorl(x_hi, x_hi); |
|
391 |
decrementl(x_hi); |
|
392 |
||
393 |
bind(done); |
|
394 |
} |
|
395 |
||
396 |
void MacroAssembler::lea(Register dst, AddressLiteral src) { |
|
397 |
mov_literal32(dst, (int32_t)src.target(), src.rspec()); |
|
398 |
} |
|
399 |
||
400 |
void MacroAssembler::lea(Address dst, AddressLiteral adr) { |
|
401 |
// leal(dst, as_Address(adr)); |
|
402 |
// see note in movl as to why we must use a move |
|
403 |
mov_literal32(dst, (int32_t) adr.target(), adr.rspec()); |
|
404 |
} |
|
405 |
||
406 |
void MacroAssembler::leave() { |
|
407 |
mov(rsp, rbp); |
|
408 |
pop(rbp); |
|
409 |
} |
|
410 |
||
411 |
void MacroAssembler::lmul(int x_rsp_offset, int y_rsp_offset) { |
|
412 |
// Multiplication of two Java long values stored on the stack |
|
413 |
// as illustrated below. Result is in rdx:rax. |
|
414 |
// |
|
415 |
// rsp ---> [ ?? ] \ \ |
|
416 |
// .... | y_rsp_offset | |
|
417 |
// [ y_lo ] / (in bytes) | x_rsp_offset |
|
418 |
// [ y_hi ] | (in bytes) |
|
419 |
// .... | |
|
420 |
// [ x_lo ] / |
|
421 |
// [ x_hi ] |
|
422 |
// .... |
|
423 |
// |
|
424 |
// Basic idea: lo(result) = lo(x_lo * y_lo) |
|
425 |
// hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi) |
|
426 |
Address x_hi(rsp, x_rsp_offset + wordSize); Address x_lo(rsp, x_rsp_offset); |
|
427 |
Address y_hi(rsp, y_rsp_offset + wordSize); Address y_lo(rsp, y_rsp_offset); |
|
428 |
Label quick; |
|
429 |
// load x_hi, y_hi and check if quick |
|
430 |
// multiplication is possible |
|
431 |
movl(rbx, x_hi); |
|
432 |
movl(rcx, y_hi); |
|
433 |
movl(rax, rbx); |
|
434 |
orl(rbx, rcx); // rbx, = 0 <=> x_hi = 0 and y_hi = 0 |
|
435 |
jcc(Assembler::zero, quick); // if rbx, = 0 do quick multiply |
|
436 |
// do full multiplication |
|
437 |
// 1st step |
|
438 |
mull(y_lo); // x_hi * y_lo |
|
439 |
movl(rbx, rax); // save lo(x_hi * y_lo) in rbx, |
|
440 |
// 2nd step |
|
441 |
movl(rax, x_lo); |
|
442 |
mull(rcx); // x_lo * y_hi |
|
443 |
addl(rbx, rax); // add lo(x_lo * y_hi) to rbx, |
|
444 |
// 3rd step |
|
445 |
bind(quick); // note: rbx, = 0 if quick multiply! |
|
446 |
movl(rax, x_lo); |
|
447 |
mull(y_lo); // x_lo * y_lo |
|
448 |
addl(rdx, rbx); // correct hi(x_lo * y_lo) |
|
449 |
} |
|
450 |
||
451 |
void MacroAssembler::lneg(Register hi, Register lo) { |
|
452 |
negl(lo); |
|
453 |
adcl(hi, 0); |
|
454 |
negl(hi); |
|
455 |
} |
|
456 |
||
457 |
void MacroAssembler::lshl(Register hi, Register lo) { |
|
458 |
// Java shift left long support (semantics as described in JVM spec., p.305) |
|
459 |
// (basic idea for shift counts s >= n: x << s == (x << n) << (s - n)) |
|
460 |
// shift value is in rcx ! |
|
461 |
assert(hi != rcx, "must not use rcx"); |
|
462 |
assert(lo != rcx, "must not use rcx"); |
|
463 |
const Register s = rcx; // shift count |
|
464 |
const int n = BitsPerWord; |
|
465 |
Label L; |
|
466 |
andl(s, 0x3f); // s := s & 0x3f (s < 0x40) |
|
467 |
cmpl(s, n); // if (s < n) |
|
468 |
jcc(Assembler::less, L); // else (s >= n) |
|
469 |
movl(hi, lo); // x := x << n |
|
470 |
xorl(lo, lo); |
|
471 |
// Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n! |
|
472 |
bind(L); // s (mod n) < n |
|
473 |
shldl(hi, lo); // x := x << s |
|
474 |
shll(lo); |
|
475 |
} |
|
476 |
||
477 |
||
478 |
void MacroAssembler::lshr(Register hi, Register lo, bool sign_extension) { |
|
479 |
// Java shift right long support (semantics as described in JVM spec., p.306 & p.310) |
|
480 |
// (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n)) |
|
481 |
assert(hi != rcx, "must not use rcx"); |
|
482 |
assert(lo != rcx, "must not use rcx"); |
|
483 |
const Register s = rcx; // shift count |
|
484 |
const int n = BitsPerWord; |
|
485 |
Label L; |
|
486 |
andl(s, 0x3f); // s := s & 0x3f (s < 0x40) |
|
487 |
cmpl(s, n); // if (s < n) |
|
488 |
jcc(Assembler::less, L); // else (s >= n) |
|
489 |
movl(lo, hi); // x := x >> n |
|
490 |
if (sign_extension) sarl(hi, 31); |
|
491 |
else xorl(hi, hi); |
|
492 |
// Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n! |
|
493 |
bind(L); // s (mod n) < n |
|
494 |
shrdl(lo, hi); // x := x >> s |
|
495 |
if (sign_extension) sarl(hi); |
|
496 |
else shrl(hi); |
|
497 |
} |
|
498 |
||
499 |
void MacroAssembler::movoop(Register dst, jobject obj) { |
|
500 |
mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate()); |
|
501 |
} |
|
502 |
||
503 |
void MacroAssembler::movoop(Address dst, jobject obj) { |
|
504 |
mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate()); |
|
505 |
} |
|
506 |
||
507 |
void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { |
|
508 |
mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate()); |
|
509 |
} |
|
510 |
||
511 |
void MacroAssembler::mov_metadata(Address dst, Metadata* obj) { |
|
512 |
mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate()); |
|
513 |
} |
|
514 |
||
515 |
void MacroAssembler::movptr(Register dst, AddressLiteral src) { |
|
516 |
if (src.is_lval()) { |
|
517 |
mov_literal32(dst, (intptr_t)src.target(), src.rspec()); |
|
518 |
} else { |
|
519 |
movl(dst, as_Address(src)); |
|
520 |
} |
|
521 |
} |
|
522 |
||
523 |
void MacroAssembler::movptr(ArrayAddress dst, Register src) { |
|
524 |
movl(as_Address(dst), src); |
|
525 |
} |
|
526 |
||
527 |
void MacroAssembler::movptr(Register dst, ArrayAddress src) { |
|
528 |
movl(dst, as_Address(src)); |
|
529 |
} |
|
530 |
||
531 |
// src should NEVER be a real pointer. Use AddressLiteral for true pointers |
|
532 |
void MacroAssembler::movptr(Address dst, intptr_t src) { |
|
533 |
movl(dst, src); |
|
534 |
} |
|
535 |
||
536 |
||
537 |
void MacroAssembler::pop_callee_saved_registers() { |
|
538 |
pop(rcx); |
|
539 |
pop(rdx); |
|
540 |
pop(rdi); |
|
541 |
pop(rsi); |
|
542 |
} |
|
543 |
||
544 |
void MacroAssembler::pop_fTOS() { |
|
545 |
fld_d(Address(rsp, 0)); |
|
546 |
addl(rsp, 2 * wordSize); |
|
547 |
} |
|
548 |
||
549 |
void MacroAssembler::push_callee_saved_registers() { |
|
550 |
push(rsi); |
|
551 |
push(rdi); |
|
552 |
push(rdx); |
|
553 |
push(rcx); |
|
554 |
} |
|
555 |
||
556 |
void MacroAssembler::push_fTOS() { |
|
557 |
subl(rsp, 2 * wordSize); |
|
558 |
fstp_d(Address(rsp, 0)); |
|
559 |
} |
|
560 |
||
561 |
||
562 |
void MacroAssembler::pushoop(jobject obj) { |
|
563 |
push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate()); |
|
564 |
} |
|
565 |
||
566 |
void MacroAssembler::pushklass(Metadata* obj) { |
|
567 |
push_literal32((int32_t)obj, metadata_Relocation::spec_for_immediate()); |
|
568 |
} |
|
569 |
||
570 |
void MacroAssembler::pushptr(AddressLiteral src) { |
|
571 |
if (src.is_lval()) { |
|
572 |
push_literal32((int32_t)src.target(), src.rspec()); |
|
573 |
} else { |
|
574 |
pushl(as_Address(src)); |
|
575 |
} |
|
576 |
} |
|
577 |
||
578 |
void MacroAssembler::set_word_if_not_zero(Register dst) { |
|
579 |
xorl(dst, dst); |
|
580 |
set_byte_if_not_zero(dst); |
|
581 |
} |
|
582 |
||
583 |
static void pass_arg0(MacroAssembler* masm, Register arg) { |
|
584 |
masm->push(arg); |
|
585 |
} |
|
586 |
||
587 |
static void pass_arg1(MacroAssembler* masm, Register arg) { |
|
588 |
masm->push(arg); |
|
589 |
} |
|
590 |
||
591 |
static void pass_arg2(MacroAssembler* masm, Register arg) { |
|
592 |
masm->push(arg); |
|
593 |
} |
|
594 |
||
595 |
static void pass_arg3(MacroAssembler* masm, Register arg) { |
|
596 |
masm->push(arg); |
|
597 |
} |
|
598 |
||
599 |
#ifndef PRODUCT |
|
600 |
extern "C" void findpc(intptr_t x); |
|
601 |
#endif |
|
602 |
||
603 |
void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) { |
|
604 |
// In order to get locks to work, we need to fake a in_VM state |
|
605 |
JavaThread* thread = JavaThread::current(); |
|
606 |
JavaThreadState saved_state = thread->thread_state(); |
|
607 |
thread->set_thread_state(_thread_in_vm); |
|
608 |
if (ShowMessageBoxOnError) { |
|
609 |
JavaThread* thread = JavaThread::current(); |
|
610 |
JavaThreadState saved_state = thread->thread_state(); |
|
611 |
thread->set_thread_state(_thread_in_vm); |
|
612 |
if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { |
|
613 |
ttyLocker ttyl; |
|
614 |
BytecodeCounter::print(); |
|
615 |
} |
|
616 |
// To see where a verify_oop failed, get $ebx+40/X for this frame. |
|
617 |
// This is the value of eip which points to where verify_oop will return. |
|
618 |
if (os::message_box(msg, "Execution stopped, print registers?")) { |
|
619 |
print_state32(rdi, rsi, rbp, rsp, rbx, rdx, rcx, rax, eip); |
|
620 |
BREAKPOINT; |
|
621 |
} |
|
622 |
} else { |
|
623 |
ttyLocker ttyl; |
|
624 |
::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg); |
|
625 |
} |
|
626 |
// Don't assert holding the ttyLock |
|
627 |
assert(false, err_msg("DEBUG MESSAGE: %s", msg)); |
|
628 |
ThreadStateTransition::transition(thread, _thread_in_vm, saved_state); |
|
629 |
} |
|
630 |
||
631 |
void MacroAssembler::print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip) { |
|
632 |
ttyLocker ttyl; |
|
633 |
FlagSetting fs(Debugging, true); |
|
634 |
tty->print_cr("eip = 0x%08x", eip); |
|
635 |
#ifndef PRODUCT |
|
636 |
if ((WizardMode || Verbose) && PrintMiscellaneous) { |
|
637 |
tty->cr(); |
|
638 |
findpc(eip); |
|
639 |
tty->cr(); |
|
640 |
} |
|
641 |
#endif |
|
642 |
#define PRINT_REG(rax) \ |
|
643 |
{ tty->print("%s = ", #rax); os::print_location(tty, rax); } |
|
644 |
PRINT_REG(rax); |
|
645 |
PRINT_REG(rbx); |
|
646 |
PRINT_REG(rcx); |
|
647 |
PRINT_REG(rdx); |
|
648 |
PRINT_REG(rdi); |
|
649 |
PRINT_REG(rsi); |
|
650 |
PRINT_REG(rbp); |
|
651 |
PRINT_REG(rsp); |
|
652 |
#undef PRINT_REG |
|
653 |
// Print some words near top of staack. |
|
654 |
int* dump_sp = (int*) rsp; |
|
655 |
for (int col1 = 0; col1 < 8; col1++) { |
|
656 |
tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); |
|
657 |
os::print_location(tty, *dump_sp++); |
|
658 |
} |
|
659 |
for (int row = 0; row < 16; row++) { |
|
660 |
tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); |
|
661 |
for (int col = 0; col < 8; col++) { |
|
662 |
tty->print(" 0x%08x", *dump_sp++); |
|
663 |
} |
|
664 |
tty->cr(); |
|
665 |
} |
|
666 |
// Print some instructions around pc: |
|
667 |
Disassembler::decode((address)eip-64, (address)eip); |
|
668 |
tty->print_cr("--------"); |
|
669 |
Disassembler::decode((address)eip, (address)eip+32); |
|
670 |
} |
|
671 |
||
672 |
void MacroAssembler::stop(const char* msg) { |
|
673 |
ExternalAddress message((address)msg); |
|
674 |
// push address of message |
|
675 |
pushptr(message.addr()); |
|
676 |
{ Label L; call(L, relocInfo::none); bind(L); } // push eip |
|
677 |
pusha(); // push registers |
|
678 |
call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32))); |
|
679 |
hlt(); |
|
680 |
} |
|
681 |
||
682 |
void MacroAssembler::warn(const char* msg) { |
|
683 |
push_CPU_state(); |
|
684 |
||
685 |
ExternalAddress message((address) msg); |
|
686 |
// push address of message |
|
687 |
pushptr(message.addr()); |
|
688 |
||
689 |
call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning))); |
|
690 |
addl(rsp, wordSize); // discard argument |
|
691 |
pop_CPU_state(); |
|
692 |
} |
|
693 |
||
694 |
void MacroAssembler::print_state() { |
|
695 |
{ Label L; call(L, relocInfo::none); bind(L); } // push eip |
|
696 |
pusha(); // push registers |
|
697 |
||
698 |
push_CPU_state(); |
|
699 |
call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::print_state32))); |
|
700 |
pop_CPU_state(); |
|
701 |
||
702 |
popa(); |
|
703 |
addl(rsp, wordSize); |
|
704 |
} |
|
705 |
||
706 |
#else // _LP64 |
|
707 |
||
708 |
// 64 bit versions |
|
709 |
||
710 |
Address MacroAssembler::as_Address(AddressLiteral adr) { |
|
711 |
// amd64 always does this as a pc-rel |
|
712 |
// we can be absolute or disp based on the instruction type |
|
713 |
// jmp/call are displacements others are absolute |
|
714 |
assert(!adr.is_lval(), "must be rval"); |
|
715 |
assert(reachable(adr), "must be"); |
|
716 |
return Address((int32_t)(intptr_t)(adr.target() - pc()), adr.target(), adr.reloc()); |
|
717 |
||
718 |
} |
|
719 |
||
720 |
Address MacroAssembler::as_Address(ArrayAddress adr) { |
|
721 |
AddressLiteral base = adr.base(); |
|
722 |
lea(rscratch1, base); |
|
723 |
Address index = adr.index(); |
|
724 |
assert(index._disp == 0, "must not have disp"); // maybe it can? |
|
725 |
Address array(rscratch1, index._index, index._scale, index._disp); |
|
726 |
return array; |
|
727 |
} |
|
728 |
||
729 |
int MacroAssembler::biased_locking_enter(Register lock_reg, |
|
730 |
Register obj_reg, |
|
731 |
Register swap_reg, |
|
732 |
Register tmp_reg, |
|
733 |
bool swap_reg_contains_mark, |
|
734 |
Label& done, |
|
735 |
Label* slow_case, |
|
736 |
BiasedLockingCounters* counters) { |
|
737 |
assert(UseBiasedLocking, "why call this otherwise?"); |
|
738 |
assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq"); |
|
739 |
assert(tmp_reg != noreg, "tmp_reg must be supplied"); |
|
740 |
assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg); |
|
741 |
assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); |
|
742 |
Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes()); |
|
743 |
Address saved_mark_addr(lock_reg, 0); |
|
744 |
||
745 |
if (PrintBiasedLockingStatistics && counters == NULL) |
|
746 |
counters = BiasedLocking::counters(); |
|
747 |
||
748 |
// Biased locking |
|
749 |
// See whether the lock is currently biased toward our thread and |
|
750 |
// whether the epoch is still valid |
|
751 |
// Note that the runtime guarantees sufficient alignment of JavaThread |
|
752 |
// pointers to allow age to be placed into low bits |
|
753 |
// First check to see whether biasing is even enabled for this object |
|
754 |
Label cas_label; |
|
755 |
int null_check_offset = -1; |
|
756 |
if (!swap_reg_contains_mark) { |
|
757 |
null_check_offset = offset(); |
|
758 |
movq(swap_reg, mark_addr); |
|
759 |
} |
|
760 |
movq(tmp_reg, swap_reg); |
|
761 |
andq(tmp_reg, markOopDesc::biased_lock_mask_in_place); |
|
762 |
cmpq(tmp_reg, markOopDesc::biased_lock_pattern); |
|
763 |
jcc(Assembler::notEqual, cas_label); |
|
764 |
// The bias pattern is present in the object's header. Need to check |
|
765 |
// whether the bias owner and the epoch are both still current. |
|
766 |
load_prototype_header(tmp_reg, obj_reg); |
|
767 |
orq(tmp_reg, r15_thread); |
|
768 |
xorq(tmp_reg, swap_reg); |
|
769 |
andq(tmp_reg, ~((int) markOopDesc::age_mask_in_place)); |
|
770 |
if (counters != NULL) { |
|
771 |
cond_inc32(Assembler::zero, |
|
772 |
ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr())); |
|
773 |
} |
|
774 |
jcc(Assembler::equal, done); |
|
775 |
||
776 |
Label try_revoke_bias; |
|
777 |
Label try_rebias; |
|
778 |
||
779 |
// At this point we know that the header has the bias pattern and |
|
780 |
// that we are not the bias owner in the current epoch. We need to |
|
781 |
// figure out more details about the state of the header in order to |
|
782 |
// know what operations can be legally performed on the object's |
|
783 |
// header. |
|
784 |
||
785 |
// If the low three bits in the xor result aren't clear, that means |
|
786 |
// the prototype header is no longer biased and we have to revoke |
|
787 |
// the bias on this object. |
|
788 |
testq(tmp_reg, markOopDesc::biased_lock_mask_in_place); |
|
789 |
jcc(Assembler::notZero, try_revoke_bias); |
|
790 |
||
791 |
// Biasing is still enabled for this data type. See whether the |
|
792 |
// epoch of the current bias is still valid, meaning that the epoch |
|
793 |
// bits of the mark word are equal to the epoch bits of the |
|
794 |
// prototype header. (Note that the prototype header's epoch bits |
|
795 |
// only change at a safepoint.) If not, attempt to rebias the object |
|
796 |
// toward the current thread. Note that we must be absolutely sure |
|
797 |
// that the current epoch is invalid in order to do this because |
|
798 |
// otherwise the manipulations it performs on the mark word are |
|
799 |
// illegal. |
|
800 |
testq(tmp_reg, markOopDesc::epoch_mask_in_place); |
|
801 |
jcc(Assembler::notZero, try_rebias); |
|
802 |
||
803 |
// The epoch of the current bias is still valid but we know nothing |
|
804 |
// about the owner; it might be set or it might be clear. Try to |
|
805 |
// acquire the bias of the object using an atomic operation. If this |
|
806 |
// fails we will go in to the runtime to revoke the object's bias. |
|
807 |
// Note that we first construct the presumed unbiased header so we |
|
808 |
// don't accidentally blow away another thread's valid bias. |
|
809 |
andq(swap_reg, |
|
810 |
markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place); |
|
811 |
movq(tmp_reg, swap_reg); |
|
812 |
orq(tmp_reg, r15_thread); |
|
813 |
if (os::is_MP()) { |
|
814 |
lock(); |
|
815 |
} |
|
816 |
cmpxchgq(tmp_reg, Address(obj_reg, 0)); |
|
817 |
// If the biasing toward our thread failed, this means that |
|
818 |
// another thread succeeded in biasing it toward itself and we |
|
819 |
// need to revoke that bias. The revocation will occur in the |
|
820 |
// interpreter runtime in the slow case. |
|
821 |
if (counters != NULL) { |
|
822 |
cond_inc32(Assembler::zero, |
|
823 |
ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr())); |
|
824 |
} |
|
825 |
if (slow_case != NULL) { |
|
826 |
jcc(Assembler::notZero, *slow_case); |
|
827 |
} |
|
828 |
jmp(done); |
|
829 |
||
830 |
bind(try_rebias); |
|
831 |
// At this point we know the epoch has expired, meaning that the |
|
832 |
// current "bias owner", if any, is actually invalid. Under these |
|
833 |
// circumstances _only_, we are allowed to use the current header's |
|
834 |
// value as the comparison value when doing the cas to acquire the |
|
835 |
// bias in the current epoch. In other words, we allow transfer of |
|
836 |
// the bias from one thread to another directly in this situation. |
|
837 |
// |
|
838 |
// FIXME: due to a lack of registers we currently blow away the age |
|
839 |
// bits in this situation. Should attempt to preserve them. |
|
840 |
load_prototype_header(tmp_reg, obj_reg); |
|
841 |
orq(tmp_reg, r15_thread); |
|
842 |
if (os::is_MP()) { |
|
843 |
lock(); |
|
844 |
} |
|
845 |
cmpxchgq(tmp_reg, Address(obj_reg, 0)); |
|
846 |
// If the biasing toward our thread failed, then another thread |
|
847 |
// succeeded in biasing it toward itself and we need to revoke that |
|
848 |
// bias. The revocation will occur in the runtime in the slow case. |
|
849 |
if (counters != NULL) { |
|
850 |
cond_inc32(Assembler::zero, |
|
851 |
ExternalAddress((address) counters->rebiased_lock_entry_count_addr())); |
|
852 |
} |
|
853 |
if (slow_case != NULL) { |
|
854 |
jcc(Assembler::notZero, *slow_case); |
|
855 |
} |
|
856 |
jmp(done); |
|
857 |
||
858 |
bind(try_revoke_bias); |
|
859 |
// The prototype mark in the klass doesn't have the bias bit set any |
|
860 |
// more, indicating that objects of this data type are not supposed |
|
861 |
// to be biased any more. We are going to try to reset the mark of |
|
862 |
// this object to the prototype value and fall through to the |
|
863 |
// CAS-based locking scheme. Note that if our CAS fails, it means |
|
864 |
// that another thread raced us for the privilege of revoking the |
|
865 |
// bias of this particular object, so it's okay to continue in the |
|
866 |
// normal locking code. |
|
867 |
// |
|
868 |
// FIXME: due to a lack of registers we currently blow away the age |
|
869 |
// bits in this situation. Should attempt to preserve them. |
|
870 |
load_prototype_header(tmp_reg, obj_reg); |
|
871 |
if (os::is_MP()) { |
|
872 |
lock(); |
|
873 |
} |
|
874 |
cmpxchgq(tmp_reg, Address(obj_reg, 0)); |
|
875 |
// Fall through to the normal CAS-based lock, because no matter what |
|
876 |
// the result of the above CAS, some thread must have succeeded in |
|
877 |
// removing the bias bit from the object's header. |
|
878 |
if (counters != NULL) { |
|
879 |
cond_inc32(Assembler::zero, |
|
880 |
ExternalAddress((address) counters->revoked_lock_entry_count_addr())); |
|
881 |
} |
|
882 |
||
883 |
bind(cas_label); |
|
884 |
||
885 |
return null_check_offset; |
|
886 |
} |
|
887 |
||
888 |
void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) { |
|
889 |
Label L, E; |
|
890 |
||
891 |
#ifdef _WIN64 |
|
892 |
// Windows always allocates space for it's register args |
|
893 |
assert(num_args <= 4, "only register arguments supported"); |
|
894 |
subq(rsp, frame::arg_reg_save_area_bytes); |
|
895 |
#endif |
|
896 |
||
897 |
// Align stack if necessary |
|
898 |
testl(rsp, 15); |
|
899 |
jcc(Assembler::zero, L); |
|
900 |
||
901 |
subq(rsp, 8); |
|
902 |
{ |
|
903 |
call(RuntimeAddress(entry_point)); |
|
904 |
} |
|
905 |
addq(rsp, 8); |
|
906 |
jmp(E); |
|
907 |
||
908 |
bind(L); |
|
909 |
{ |
|
910 |
call(RuntimeAddress(entry_point)); |
|
911 |
} |
|
912 |
||
913 |
bind(E); |
|
914 |
||
915 |
#ifdef _WIN64 |
|
916 |
// restore stack pointer |
|
917 |
addq(rsp, frame::arg_reg_save_area_bytes); |
|
918 |
#endif |
|
919 |
||
920 |
} |
|
921 |
||
922 |
void MacroAssembler::cmp64(Register src1, AddressLiteral src2) { |
|
923 |
assert(!src2.is_lval(), "should use cmpptr"); |
|
924 |
||
925 |
if (reachable(src2)) { |
|
926 |
cmpq(src1, as_Address(src2)); |
|
927 |
} else { |
|
928 |
lea(rscratch1, src2); |
|
929 |
Assembler::cmpq(src1, Address(rscratch1, 0)); |
|
930 |
} |
|
931 |
} |
|
932 |
||
933 |
int MacroAssembler::corrected_idivq(Register reg) { |
|
934 |
// Full implementation of Java ldiv and lrem; checks for special |
|
935 |
// case as described in JVM spec., p.243 & p.271. The function |
|
936 |
// returns the (pc) offset of the idivl instruction - may be needed |
|
937 |
// for implicit exceptions. |
|
938 |
// |
|
939 |
// normal case special case |
|
940 |
// |
|
941 |
// input : rax: dividend min_long |
|
942 |
// reg: divisor (may not be eax/edx) -1 |
|
943 |
// |
|
944 |
// output: rax: quotient (= rax idiv reg) min_long |
|
945 |
// rdx: remainder (= rax irem reg) 0 |
|
946 |
assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register"); |
|
947 |
static const int64_t min_long = 0x8000000000000000; |
|
948 |
Label normal_case, special_case; |
|
949 |
||
950 |
// check for special case |
|
951 |
cmp64(rax, ExternalAddress((address) &min_long)); |
|
952 |
jcc(Assembler::notEqual, normal_case); |
|
953 |
xorl(rdx, rdx); // prepare rdx for possible special case (where |
|
954 |
// remainder = 0) |
|
955 |
cmpq(reg, -1); |
|
956 |
jcc(Assembler::equal, special_case); |
|
957 |
||
958 |
// handle normal case |
|
959 |
bind(normal_case); |
|
960 |
cdqq(); |
|
961 |
int idivq_offset = offset(); |
|
962 |
idivq(reg); |
|
963 |
||
964 |
// normal and special case exit |
|
965 |
bind(special_case); |
|
966 |
||
967 |
return idivq_offset; |
|
968 |
} |
|
969 |
||
970 |
void MacroAssembler::decrementq(Register reg, int value) { |
|
971 |
if (value == min_jint) { subq(reg, value); return; } |
|
972 |
if (value < 0) { incrementq(reg, -value); return; } |
|
973 |
if (value == 0) { ; return; } |
|
974 |
if (value == 1 && UseIncDec) { decq(reg) ; return; } |
|
975 |
/* else */ { subq(reg, value) ; return; } |
|
976 |
} |
|
977 |
||
978 |
void MacroAssembler::decrementq(Address dst, int value) { |
|
979 |
if (value == min_jint) { subq(dst, value); return; } |
|
980 |
if (value < 0) { incrementq(dst, -value); return; } |
|
981 |
if (value == 0) { ; return; } |
|
982 |
if (value == 1 && UseIncDec) { decq(dst) ; return; } |
|
983 |
/* else */ { subq(dst, value) ; return; } |
|
984 |
} |
|
985 |
||
986 |
void MacroAssembler::incrementq(Register reg, int value) { |
|
987 |
if (value == min_jint) { addq(reg, value); return; } |
|
988 |
if (value < 0) { decrementq(reg, -value); return; } |
|
989 |
if (value == 0) { ; return; } |
|
990 |
if (value == 1 && UseIncDec) { incq(reg) ; return; } |
|
991 |
/* else */ { addq(reg, value) ; return; } |
|
992 |
} |
|
993 |
||
994 |
void MacroAssembler::incrementq(Address dst, int value) { |
|
995 |
if (value == min_jint) { addq(dst, value); return; } |
|
996 |
if (value < 0) { decrementq(dst, -value); return; } |
|
997 |
if (value == 0) { ; return; } |
|
998 |
if (value == 1 && UseIncDec) { incq(dst) ; return; } |
|
999 |
/* else */ { addq(dst, value) ; return; } |
|
1000 |
} |
|
1001 |
||
1002 |
// 32bit can do a case table jump in one instruction but we no longer allow the base |
|
1003 |
// to be installed in the Address class |
|
1004 |
void MacroAssembler::jump(ArrayAddress entry) { |
|
1005 |
lea(rscratch1, entry.base()); |
|
1006 |
Address dispatch = entry.index(); |
|
1007 |
assert(dispatch._base == noreg, "must be"); |
|
1008 |
dispatch._base = rscratch1; |
|
1009 |
jmp(dispatch); |
|
1010 |
} |
|
1011 |
||
1012 |
void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) { |
|
1013 |
ShouldNotReachHere(); // 64bit doesn't use two regs |
|
1014 |
cmpq(x_lo, y_lo); |
|
1015 |
} |
|
1016 |
||
1017 |
void MacroAssembler::lea(Register dst, AddressLiteral src) { |
|
1018 |
mov_literal64(dst, (intptr_t)src.target(), src.rspec()); |
|
1019 |
} |
|
1020 |
||
1021 |
void MacroAssembler::lea(Address dst, AddressLiteral adr) { |
|
1022 |
mov_literal64(rscratch1, (intptr_t)adr.target(), adr.rspec()); |
|
1023 |
movptr(dst, rscratch1); |
|
1024 |
} |
|
1025 |
||
1026 |
void MacroAssembler::leave() { |
|
1027 |
// %%% is this really better? Why not on 32bit too? |
|
14837
a75c3082d106
8004250: replace AbstractAssembler a_byte/a_long with emit_int8/emit_int32
twisti
parents:
14834
diff
changeset
|
1028 |
emit_int8((unsigned char)0xC9); // LEAVE |
14626 | 1029 |
} |
1030 |
||
1031 |
void MacroAssembler::lneg(Register hi, Register lo) { |
|
1032 |
ShouldNotReachHere(); // 64bit doesn't use two regs |
|
1033 |
negq(lo); |
|
1034 |
} |
|
1035 |
||
1036 |
void MacroAssembler::movoop(Register dst, jobject obj) { |
|
1037 |
mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate()); |
|
1038 |
} |
|
1039 |
||
1040 |
void MacroAssembler::movoop(Address dst, jobject obj) { |
|
1041 |
mov_literal64(rscratch1, (intptr_t)obj, oop_Relocation::spec_for_immediate()); |
|
1042 |
movq(dst, rscratch1); |
|
1043 |
} |
|
1044 |
||
1045 |
void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { |
|
1046 |
mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate()); |
|
1047 |
} |
|
1048 |
||
1049 |
void MacroAssembler::mov_metadata(Address dst, Metadata* obj) { |
|
1050 |
mov_literal64(rscratch1, (intptr_t)obj, metadata_Relocation::spec_for_immediate()); |
|
1051 |
movq(dst, rscratch1); |
|
1052 |
} |
|
1053 |
||
1054 |
void MacroAssembler::movptr(Register dst, AddressLiteral src) { |
|
1055 |
if (src.is_lval()) { |
|
1056 |
mov_literal64(dst, (intptr_t)src.target(), src.rspec()); |
|
1057 |
} else { |
|
1058 |
if (reachable(src)) { |
|
1059 |
movq(dst, as_Address(src)); |
|
1060 |
} else { |
|
1061 |
lea(rscratch1, src); |
|
1062 |
movq(dst, Address(rscratch1,0)); |
|
1063 |
} |
|
1064 |
} |
|
1065 |
} |
|
1066 |
||
1067 |
void MacroAssembler::movptr(ArrayAddress dst, Register src) { |
|
1068 |
movq(as_Address(dst), src); |
|
1069 |
} |
|
1070 |
||
1071 |
void MacroAssembler::movptr(Register dst, ArrayAddress src) { |
|
1072 |
movq(dst, as_Address(src)); |
|
1073 |
} |
|
1074 |
||
1075 |
// src should NEVER be a real pointer. Use AddressLiteral for true pointers |
|
1076 |
void MacroAssembler::movptr(Address dst, intptr_t src) { |
|
1077 |
mov64(rscratch1, src); |
|
1078 |
movq(dst, rscratch1); |
|
1079 |
} |
|
1080 |
||
1081 |
// These are mostly for initializing NULL |
|
1082 |
void MacroAssembler::movptr(Address dst, int32_t src) { |
|
1083 |
movslq(dst, src); |
|
1084 |
} |
|
1085 |
||
1086 |
void MacroAssembler::movptr(Register dst, int32_t src) { |
|
1087 |
mov64(dst, (intptr_t)src); |
|
1088 |
} |
|
1089 |
||
1090 |
void MacroAssembler::pushoop(jobject obj) { |
|
1091 |
movoop(rscratch1, obj); |
|
1092 |
push(rscratch1); |
|
1093 |
} |
|
1094 |
||
1095 |
void MacroAssembler::pushklass(Metadata* obj) { |
|
1096 |
mov_metadata(rscratch1, obj); |
|
1097 |
push(rscratch1); |
|
1098 |
} |
|
1099 |
||
1100 |
void MacroAssembler::pushptr(AddressLiteral src) { |
|
1101 |
lea(rscratch1, src); |
|
1102 |
if (src.is_lval()) { |
|
1103 |
push(rscratch1); |
|
1104 |
} else { |
|
1105 |
pushq(Address(rscratch1, 0)); |
|
1106 |
} |
|
1107 |
} |
|
1108 |
||
1109 |
void MacroAssembler::reset_last_Java_frame(bool clear_fp, |
|
1110 |
bool clear_pc) { |
|
1111 |
// we must set sp to zero to clear frame |
|
1112 |
movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD); |
|
1113 |
// must clear fp, so that compiled frames are not confused; it is |
|
1114 |
// possible that we need it only for debugging |
|
1115 |
if (clear_fp) { |
|
1116 |
movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD); |
|
1117 |
} |
|
1118 |
||
1119 |
if (clear_pc) { |
|
1120 |
movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD); |
|
1121 |
} |
|
1122 |
} |
|
1123 |
||
1124 |
void MacroAssembler::set_last_Java_frame(Register last_java_sp, |
|
1125 |
Register last_java_fp, |
|
1126 |
address last_java_pc) { |
|
1127 |
// determine last_java_sp register |
|
1128 |
if (!last_java_sp->is_valid()) { |
|
1129 |
last_java_sp = rsp; |
|
1130 |
} |
|
1131 |
||
1132 |
// last_java_fp is optional |
|
1133 |
if (last_java_fp->is_valid()) { |
|
1134 |
movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), |
|
1135 |
last_java_fp); |
|
1136 |
} |
|
1137 |
||
1138 |
// last_java_pc is optional |
|
1139 |
if (last_java_pc != NULL) { |
|
1140 |
Address java_pc(r15_thread, |
|
1141 |
JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); |
|
1142 |
lea(rscratch1, InternalAddress(last_java_pc)); |
|
1143 |
movptr(java_pc, rscratch1); |
|
1144 |
} |
|
1145 |
||
1146 |
movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp); |
|
1147 |
} |
|
1148 |
||
1149 |
static void pass_arg0(MacroAssembler* masm, Register arg) { |
|
1150 |
if (c_rarg0 != arg ) { |
|
1151 |
masm->mov(c_rarg0, arg); |
|
1152 |
} |
|
1153 |
} |
|
1154 |
||
1155 |
static void pass_arg1(MacroAssembler* masm, Register arg) { |
|
1156 |
if (c_rarg1 != arg ) { |
|
1157 |
masm->mov(c_rarg1, arg); |
|
1158 |
} |
|
1159 |
} |
|
1160 |
||
1161 |
static void pass_arg2(MacroAssembler* masm, Register arg) { |
|
1162 |
if (c_rarg2 != arg ) { |
|
1163 |
masm->mov(c_rarg2, arg); |
|
1164 |
} |
|
1165 |
} |
|
1166 |
||
1167 |
static void pass_arg3(MacroAssembler* masm, Register arg) { |
|
1168 |
if (c_rarg3 != arg ) { |
|
1169 |
masm->mov(c_rarg3, arg); |
|
1170 |
} |
|
1171 |
} |
|
1172 |
||
1173 |
void MacroAssembler::stop(const char* msg) { |
|
1174 |
address rip = pc(); |
|
1175 |
pusha(); // get regs on stack |
|
1176 |
lea(c_rarg0, ExternalAddress((address) msg)); |
|
1177 |
lea(c_rarg1, InternalAddress(rip)); |
|
1178 |
movq(c_rarg2, rsp); // pass pointer to regs array |
|
1179 |
andq(rsp, -16); // align stack as required by ABI |
|
1180 |
call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); |
|
1181 |
hlt(); |
|
1182 |
} |
|
1183 |
||
1184 |
void MacroAssembler::warn(const char* msg) { |
|
1185 |
push(rbp); |
|
1186 |
movq(rbp, rsp); |
|
1187 |
andq(rsp, -16); // align stack as required by push_CPU_state and call |
|
1188 |
push_CPU_state(); // keeps alignment at 16 bytes |
|
1189 |
lea(c_rarg0, ExternalAddress((address) msg)); |
|
1190 |
call_VM_leaf(CAST_FROM_FN_PTR(address, warning), c_rarg0); |
|
1191 |
pop_CPU_state(); |
|
1192 |
mov(rsp, rbp); |
|
1193 |
pop(rbp); |
|
1194 |
} |
|
1195 |
||
1196 |
void MacroAssembler::print_state() { |
|
1197 |
address rip = pc(); |
|
1198 |
pusha(); // get regs on stack |
|
1199 |
push(rbp); |
|
1200 |
movq(rbp, rsp); |
|
1201 |
andq(rsp, -16); // align stack as required by push_CPU_state and call |
|
1202 |
push_CPU_state(); // keeps alignment at 16 bytes |
|
1203 |
||
1204 |
lea(c_rarg0, InternalAddress(rip)); |
|
1205 |
lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array |
|
1206 |
call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1); |
|
1207 |
||
1208 |
pop_CPU_state(); |
|
1209 |
mov(rsp, rbp); |
|
1210 |
pop(rbp); |
|
1211 |
popa(); |
|
1212 |
} |
|
1213 |
||
1214 |
#ifndef PRODUCT |
|
1215 |
extern "C" void findpc(intptr_t x); |
|
1216 |
#endif |
|
1217 |
||
1218 |
void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) { |
|
1219 |
// In order to get locks to work, we need to fake a in_VM state |
|
1220 |
if (ShowMessageBoxOnError) { |
|
1221 |
JavaThread* thread = JavaThread::current(); |
|
1222 |
JavaThreadState saved_state = thread->thread_state(); |
|
1223 |
thread->set_thread_state(_thread_in_vm); |
|
1224 |
#ifndef PRODUCT |
|
1225 |
if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { |
|
1226 |
ttyLocker ttyl; |
|
1227 |
BytecodeCounter::print(); |
|
1228 |
} |
|
1229 |
#endif |
|
1230 |
// To see where a verify_oop failed, get $ebx+40/X for this frame. |
|
1231 |
// XXX correct this offset for amd64 |
|
1232 |
// This is the value of eip which points to where verify_oop will return. |
|
1233 |
if (os::message_box(msg, "Execution stopped, print registers?")) { |
|
1234 |
print_state64(pc, regs); |
|
1235 |
BREAKPOINT; |
|
1236 |
assert(false, "start up GDB"); |
|
1237 |
} |
|
1238 |
ThreadStateTransition::transition(thread, _thread_in_vm, saved_state); |
|
1239 |
} else { |
|
1240 |
ttyLocker ttyl; |
|
1241 |
::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", |
|
1242 |
msg); |
|
1243 |
assert(false, err_msg("DEBUG MESSAGE: %s", msg)); |
|
1244 |
} |
|
1245 |
} |
|
1246 |
||
1247 |
void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) { |
|
1248 |
ttyLocker ttyl; |
|
1249 |
FlagSetting fs(Debugging, true); |
|
1250 |
tty->print_cr("rip = 0x%016lx", pc); |
|
1251 |
#ifndef PRODUCT |
|
1252 |
tty->cr(); |
|
1253 |
findpc(pc); |
|
1254 |
tty->cr(); |
|
1255 |
#endif |
|
1256 |
#define PRINT_REG(rax, value) \ |
|
1257 |
{ tty->print("%s = ", #rax); os::print_location(tty, value); } |
|
1258 |
PRINT_REG(rax, regs[15]); |
|
1259 |
PRINT_REG(rbx, regs[12]); |
|
1260 |
PRINT_REG(rcx, regs[14]); |
|
1261 |
PRINT_REG(rdx, regs[13]); |
|
1262 |
PRINT_REG(rdi, regs[8]); |
|
1263 |
PRINT_REG(rsi, regs[9]); |
|
1264 |
PRINT_REG(rbp, regs[10]); |
|
1265 |
PRINT_REG(rsp, regs[11]); |
|
1266 |
PRINT_REG(r8 , regs[7]); |
|
1267 |
PRINT_REG(r9 , regs[6]); |
|
1268 |
PRINT_REG(r10, regs[5]); |
|
1269 |
PRINT_REG(r11, regs[4]); |
|
1270 |
PRINT_REG(r12, regs[3]); |
|
1271 |
PRINT_REG(r13, regs[2]); |
|
1272 |
PRINT_REG(r14, regs[1]); |
|
1273 |
PRINT_REG(r15, regs[0]); |
|
1274 |
#undef PRINT_REG |
|
1275 |
// Print some words near top of staack. |
|
1276 |
int64_t* rsp = (int64_t*) regs[11]; |
|
1277 |
int64_t* dump_sp = rsp; |
|
1278 |
for (int col1 = 0; col1 < 8; col1++) { |
|
1279 |
tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (int64_t)dump_sp); |
|
1280 |
os::print_location(tty, *dump_sp++); |
|
1281 |
} |
|
1282 |
for (int row = 0; row < 25; row++) { |
|
1283 |
tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (int64_t)dump_sp); |
|
1284 |
for (int col = 0; col < 4; col++) { |
|
1285 |
tty->print(" 0x%016lx", *dump_sp++); |
|
1286 |
} |
|
1287 |
tty->cr(); |
|
1288 |
} |
|
1289 |
// Print some instructions around pc: |
|
1290 |
Disassembler::decode((address)pc-64, (address)pc); |
|
1291 |
tty->print_cr("--------"); |
|
1292 |
Disassembler::decode((address)pc, (address)pc+32); |
|
1293 |
} |
|
1294 |
||
1295 |
#endif // _LP64 |
|
1296 |
||
1297 |
// Now versions that are common to 32/64 bit |
|
1298 |
||
1299 |
void MacroAssembler::addptr(Register dst, int32_t imm32) { |
|
1300 |
LP64_ONLY(addq(dst, imm32)) NOT_LP64(addl(dst, imm32)); |
|
1301 |
} |
|
1302 |
||
1303 |
void MacroAssembler::addptr(Register dst, Register src) { |
|
1304 |
LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); |
|
1305 |
} |
|
1306 |
||
1307 |
void MacroAssembler::addptr(Address dst, Register src) { |
|
1308 |
LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); |
|
1309 |
} |
|
1310 |
||
1311 |
void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src) { |
|
1312 |
if (reachable(src)) { |
|
1313 |
Assembler::addsd(dst, as_Address(src)); |
|
1314 |
} else { |
|
1315 |
lea(rscratch1, src); |
|
1316 |
Assembler::addsd(dst, Address(rscratch1, 0)); |
|
1317 |
} |
|
1318 |
} |
|
1319 |
||
1320 |
void MacroAssembler::addss(XMMRegister dst, AddressLiteral src) { |
|
1321 |
if (reachable(src)) { |
|
1322 |
addss(dst, as_Address(src)); |
|
1323 |
} else { |
|
1324 |
lea(rscratch1, src); |
|
1325 |
addss(dst, Address(rscratch1, 0)); |
|
1326 |
} |
|
1327 |
} |
|
1328 |
||
1329 |
void MacroAssembler::align(int modulus) { |
|
1330 |
if (offset() % modulus != 0) { |
|
1331 |
nop(modulus - (offset() % modulus)); |
|
1332 |
} |
|
1333 |
} |
|
1334 |
||
1335 |
void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) { |
|
1336 |
// Used in sign-masking with aligned address. |
|
1337 |
assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); |
|
1338 |
if (reachable(src)) { |
|
1339 |
Assembler::andpd(dst, as_Address(src)); |
|
1340 |
} else { |
|
1341 |
lea(rscratch1, src); |
|
1342 |
Assembler::andpd(dst, Address(rscratch1, 0)); |
|
1343 |
} |
|
1344 |
} |
|
1345 |
||
1346 |
void MacroAssembler::andps(XMMRegister dst, AddressLiteral src) { |
|
1347 |
// Used in sign-masking with aligned address. |
|
1348 |
assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); |
|
1349 |
if (reachable(src)) { |
|
1350 |
Assembler::andps(dst, as_Address(src)); |
|
1351 |
} else { |
|
1352 |
lea(rscratch1, src); |
|
1353 |
Assembler::andps(dst, Address(rscratch1, 0)); |
|
1354 |
} |
|
1355 |
} |
|
1356 |
||
1357 |
void MacroAssembler::andptr(Register dst, int32_t imm32) { |
|
1358 |
LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32)); |
|
1359 |
} |
|
1360 |
||
1361 |
void MacroAssembler::atomic_incl(AddressLiteral counter_addr) { |
|
1362 |
pushf(); |
|
1363 |
if (os::is_MP()) |
|
1364 |
lock(); |
|
1365 |
incrementl(counter_addr); |
|
1366 |
popf(); |
|
1367 |
} |
|
1368 |
||
1369 |
// Writes to stack successive pages until offset reached to check for |
|
1370 |
// stack overflow + shadow pages. This clobbers tmp. |
|
1371 |
void MacroAssembler::bang_stack_size(Register size, Register tmp) { |
|
1372 |
movptr(tmp, rsp); |
|
1373 |
// Bang stack for total size given plus shadow page size. |
|
1374 |
// Bang one page at a time because large size can bang beyond yellow and |
|
1375 |
// red zones. |
|
1376 |
Label loop; |
|
1377 |
bind(loop); |
|
1378 |
movl(Address(tmp, (-os::vm_page_size())), size ); |
|
1379 |
subptr(tmp, os::vm_page_size()); |
|
1380 |
subl(size, os::vm_page_size()); |
|
1381 |
jcc(Assembler::greater, loop); |
|
1382 |
||
1383 |
// Bang down shadow pages too. |
|
21528
479228ecf6ac
8026775: nsk/jvmti/RedefineClasses/StressRedefine crashes due to EXCEPTION_ACCESS_VIOLATION
mikael
parents:
21188
diff
changeset
|
1384 |
// At this point, (tmp-0) is the last address touched, so don't |
479228ecf6ac
8026775: nsk/jvmti/RedefineClasses/StressRedefine crashes due to EXCEPTION_ACCESS_VIOLATION
mikael
parents:
21188
diff
changeset
|
1385 |
// touch it again. (It was touched as (tmp-pagesize) but then tmp |
479228ecf6ac
8026775: nsk/jvmti/RedefineClasses/StressRedefine crashes due to EXCEPTION_ACCESS_VIOLATION
mikael
parents:
21188
diff
changeset
|
1386 |
// was post-decremented.) Skip this address by starting at i=1, and |
479228ecf6ac
8026775: nsk/jvmti/RedefineClasses/StressRedefine crashes due to EXCEPTION_ACCESS_VIOLATION
mikael
parents:
21188
diff
changeset
|
1387 |
// touch a few more pages below. N.B. It is important to touch all |
479228ecf6ac
8026775: nsk/jvmti/RedefineClasses/StressRedefine crashes due to EXCEPTION_ACCESS_VIOLATION
mikael
parents:
21188
diff
changeset
|
1388 |
// the way down to and including i=StackShadowPages. |
479228ecf6ac
8026775: nsk/jvmti/RedefineClasses/StressRedefine crashes due to EXCEPTION_ACCESS_VIOLATION
mikael
parents:
21188
diff
changeset
|
1389 |
for (int i = 1; i <= StackShadowPages; i++) { |
14626 | 1390 |
// this could be any sized move but this is can be a debugging crumb |
1391 |
// so the bigger the better. |
|
1392 |
movptr(Address(tmp, (-i*os::vm_page_size())), size ); |
|
1393 |
} |
|
1394 |
} |
|
1395 |
||
1396 |
void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) { |
|
1397 |
assert(UseBiasedLocking, "why call this otherwise?"); |
|
1398 |
||
1399 |
// Check for biased locking unlock case, which is a no-op |
|
1400 |
// Note: we do not have to check the thread ID for two reasons. |
|
1401 |
// First, the interpreter checks for IllegalMonitorStateException at |
|
1402 |
// a higher level. Second, if the bias was revoked while we held the |
|
1403 |
// lock, the object could not be rebiased toward another thread, so |
|
1404 |
// the bias bit would be clear. |
|
1405 |
movptr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); |
|
1406 |
andptr(temp_reg, markOopDesc::biased_lock_mask_in_place); |
|
1407 |
cmpptr(temp_reg, markOopDesc::biased_lock_pattern); |
|
1408 |
jcc(Assembler::equal, done); |
|
1409 |
} |
|
1410 |
||
1411 |
void MacroAssembler::c2bool(Register x) { |
|
1412 |
// implements x == 0 ? 0 : 1 |
|
1413 |
// note: must only look at least-significant byte of x |
|
1414 |
// since C-style booleans are stored in one byte |
|
1415 |
// only! (was bug) |
|
1416 |
andl(x, 0xFF); |
|
1417 |
setb(Assembler::notZero, x); |
|
1418 |
} |
|
1419 |
||
1420 |
// Wouldn't need if AddressLiteral version had new name |
|
1421 |
void MacroAssembler::call(Label& L, relocInfo::relocType rtype) { |
|
1422 |
Assembler::call(L, rtype); |
|
1423 |
} |
|
1424 |
||
1425 |
void MacroAssembler::call(Register entry) { |
|
1426 |
Assembler::call(entry); |
|
1427 |
} |
|
1428 |
||
1429 |
void MacroAssembler::call(AddressLiteral entry) { |
|
1430 |
if (reachable(entry)) { |
|
1431 |
Assembler::call_literal(entry.target(), entry.rspec()); |
|
1432 |
} else { |
|
1433 |
lea(rscratch1, entry); |
|
1434 |
Assembler::call(rscratch1); |
|
1435 |
} |
|
1436 |
} |
|
1437 |
||
1438 |
void MacroAssembler::ic_call(address entry) { |
|
1439 |
RelocationHolder rh = virtual_call_Relocation::spec(pc()); |
|
1440 |
movptr(rax, (intptr_t)Universe::non_oop_word()); |
|
1441 |
call(AddressLiteral(entry, rh)); |
|
1442 |
} |
|
1443 |
||
1444 |
// Implementation of call_VM versions |
|
1445 |
||
1446 |
void MacroAssembler::call_VM(Register oop_result, |
|
1447 |
address entry_point, |
|
1448 |
bool check_exceptions) { |
|
1449 |
Label C, E; |
|
1450 |
call(C, relocInfo::none); |
|
1451 |
jmp(E); |
|
1452 |
||
1453 |
bind(C); |
|
1454 |
call_VM_helper(oop_result, entry_point, 0, check_exceptions); |
|
1455 |
ret(0); |
|
1456 |
||
1457 |
bind(E); |
|
1458 |
} |
|
1459 |
||
1460 |
void MacroAssembler::call_VM(Register oop_result, |
|
1461 |
address entry_point, |
|
1462 |
Register arg_1, |
|
1463 |
bool check_exceptions) { |
|
1464 |
Label C, E; |
|
1465 |
call(C, relocInfo::none); |
|
1466 |
jmp(E); |
|
1467 |
||
1468 |
bind(C); |
|
1469 |
pass_arg1(this, arg_1); |
|
1470 |
call_VM_helper(oop_result, entry_point, 1, check_exceptions); |
|
1471 |
ret(0); |
|
1472 |
||
1473 |
bind(E); |
|
1474 |
} |
|
1475 |
||
1476 |
void MacroAssembler::call_VM(Register oop_result, |
|
1477 |
address entry_point, |
|
1478 |
Register arg_1, |
|
1479 |
Register arg_2, |
|
1480 |
bool check_exceptions) { |
|
1481 |
Label C, E; |
|
1482 |
call(C, relocInfo::none); |
|
1483 |
jmp(E); |
|
1484 |
||
1485 |
bind(C); |
|
1486 |
||
1487 |
LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); |
|
1488 |
||
1489 |
pass_arg2(this, arg_2); |
|
1490 |
pass_arg1(this, arg_1); |
|
1491 |
call_VM_helper(oop_result, entry_point, 2, check_exceptions); |
|
1492 |
ret(0); |
|
1493 |
||
1494 |
bind(E); |
|
1495 |
} |
|
1496 |
||
1497 |
void MacroAssembler::call_VM(Register oop_result, |
|
1498 |
address entry_point, |
|
1499 |
Register arg_1, |
|
1500 |
Register arg_2, |
|
1501 |
Register arg_3, |
|
1502 |
bool check_exceptions) { |
|
1503 |
Label C, E; |
|
1504 |
call(C, relocInfo::none); |
|
1505 |
jmp(E); |
|
1506 |
||
1507 |
bind(C); |
|
1508 |
||
1509 |
LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg")); |
|
1510 |
LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg")); |
|
1511 |
pass_arg3(this, arg_3); |
|
1512 |
||
1513 |
LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); |
|
1514 |
pass_arg2(this, arg_2); |
|
1515 |
||
1516 |
pass_arg1(this, arg_1); |
|
1517 |
call_VM_helper(oop_result, entry_point, 3, check_exceptions); |
|
1518 |
ret(0); |
|
1519 |
||
1520 |
bind(E); |
|
1521 |
} |
|
1522 |
||
1523 |
void MacroAssembler::call_VM(Register oop_result, |
|
1524 |
Register last_java_sp, |
|
1525 |
address entry_point, |
|
1526 |
int number_of_arguments, |
|
1527 |
bool check_exceptions) { |
|
1528 |
Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg); |
|
1529 |
call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions); |
|
1530 |
} |
|
1531 |
||
1532 |
void MacroAssembler::call_VM(Register oop_result, |
|
1533 |
Register last_java_sp, |
|
1534 |
address entry_point, |
|
1535 |
Register arg_1, |
|
1536 |
bool check_exceptions) { |
|
1537 |
pass_arg1(this, arg_1); |
|
1538 |
call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); |
|
1539 |
} |
|
1540 |
||
1541 |
void MacroAssembler::call_VM(Register oop_result, |
|
1542 |
Register last_java_sp, |
|
1543 |
address entry_point, |
|
1544 |
Register arg_1, |
|
1545 |
Register arg_2, |
|
1546 |
bool check_exceptions) { |
|
1547 |
||
1548 |
LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); |
|
1549 |
pass_arg2(this, arg_2); |
|
1550 |
pass_arg1(this, arg_1); |
|
1551 |
call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); |
|
1552 |
} |
|
1553 |
||
1554 |
void MacroAssembler::call_VM(Register oop_result, |
|
1555 |
Register last_java_sp, |
|
1556 |
address entry_point, |
|
1557 |
Register arg_1, |
|
1558 |
Register arg_2, |
|
1559 |
Register arg_3, |
|
1560 |
bool check_exceptions) { |
|
1561 |
LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg")); |
|
1562 |
LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg")); |
|
1563 |
pass_arg3(this, arg_3); |
|
1564 |
LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); |
|
1565 |
pass_arg2(this, arg_2); |
|
1566 |
pass_arg1(this, arg_1); |
|
1567 |
call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); |
|
1568 |
} |
|
1569 |
||
1570 |
void MacroAssembler::super_call_VM(Register oop_result, |
|
1571 |
Register last_java_sp, |
|
1572 |
address entry_point, |
|
1573 |
int number_of_arguments, |
|
1574 |
bool check_exceptions) { |
|
1575 |
Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg); |
|
1576 |
MacroAssembler::call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions); |
|
1577 |
} |
|
1578 |
||
1579 |
void MacroAssembler::super_call_VM(Register oop_result, |
|
1580 |
Register last_java_sp, |
|
1581 |
address entry_point, |
|
1582 |
Register arg_1, |
|
1583 |
bool check_exceptions) { |
|
1584 |
pass_arg1(this, arg_1); |
|
1585 |
super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); |
|
1586 |
} |
|
1587 |
||
1588 |
void MacroAssembler::super_call_VM(Register oop_result, |
|
1589 |
Register last_java_sp, |
|
1590 |
address entry_point, |
|
1591 |
Register arg_1, |
|
1592 |
Register arg_2, |
|
1593 |
bool check_exceptions) { |
|
1594 |
||
1595 |
LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); |
|
1596 |
pass_arg2(this, arg_2); |
|
1597 |
pass_arg1(this, arg_1); |
|
1598 |
super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); |
|
1599 |
} |
|
1600 |
||
1601 |
void MacroAssembler::super_call_VM(Register oop_result, |
|
1602 |
Register last_java_sp, |
|
1603 |
address entry_point, |
|
1604 |
Register arg_1, |
|
1605 |
Register arg_2, |
|
1606 |
Register arg_3, |
|
1607 |
bool check_exceptions) { |
|
1608 |
LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg")); |
|
1609 |
LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg")); |
|
1610 |
pass_arg3(this, arg_3); |
|
1611 |
LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); |
|
1612 |
pass_arg2(this, arg_2); |
|
1613 |
pass_arg1(this, arg_1); |
|
1614 |
super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); |
|
1615 |
} |
|
1616 |
||
1617 |
void MacroAssembler::call_VM_base(Register oop_result, |
|
1618 |
Register java_thread, |
|
1619 |
Register last_java_sp, |
|
1620 |
address entry_point, |
|
1621 |
int number_of_arguments, |
|
1622 |
bool check_exceptions) { |
|
1623 |
// determine java_thread register |
|
1624 |
if (!java_thread->is_valid()) { |
|
1625 |
#ifdef _LP64 |
|
1626 |
java_thread = r15_thread; |
|
1627 |
#else |
|
1628 |
java_thread = rdi; |
|
1629 |
get_thread(java_thread); |
|
1630 |
#endif // LP64 |
|
1631 |
} |
|
1632 |
// determine last_java_sp register |
|
1633 |
if (!last_java_sp->is_valid()) { |
|
1634 |
last_java_sp = rsp; |
|
1635 |
} |
|
1636 |
// debugging support |
|
1637 |
assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); |
|
1638 |
LP64_ONLY(assert(java_thread == r15_thread, "unexpected register")); |
|
1639 |
#ifdef ASSERT |
|
1640 |
// TraceBytecodes does not use r12 but saves it over the call, so don't verify |
|
1641 |
// r12 is the heapbase. |
|
19979
ebe1dbb6e1aa
8015107: NPG: Use consistent naming for metaspace concepts
ehelin
parents:
19319
diff
changeset
|
1642 |
LP64_ONLY(if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");) |
14626 | 1643 |
#endif // ASSERT |
1644 |
||
1645 |
assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); |
|
1646 |
assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); |
|
1647 |
||
1648 |
// push java thread (becomes first argument of C function) |
|
1649 |
||
1650 |
NOT_LP64(push(java_thread); number_of_arguments++); |
|
1651 |
LP64_ONLY(mov(c_rarg0, r15_thread)); |
|
1652 |
||
1653 |
// set last Java frame before call |
|
1654 |
assert(last_java_sp != rbp, "can't use ebp/rbp"); |
|
1655 |
||
1656 |
// Only interpreter should have to set fp |
|
1657 |
set_last_Java_frame(java_thread, last_java_sp, rbp, NULL); |
|
1658 |
||
1659 |
// do the call, remove parameters |
|
1660 |
MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments); |
|
1661 |
||
1662 |
// restore the thread (cannot use the pushed argument since arguments |
|
1663 |
// may be overwritten by C code generated by an optimizing compiler); |
|
1664 |
// however can use the register value directly if it is callee saved. |
|
1665 |
if (LP64_ONLY(true ||) java_thread == rdi || java_thread == rsi) { |
|
1666 |
// rdi & rsi (also r15) are callee saved -> nothing to do |
|
1667 |
#ifdef ASSERT |
|
1668 |
guarantee(java_thread != rax, "change this code"); |
|
1669 |
push(rax); |
|
1670 |
{ Label L; |
|
1671 |
get_thread(rax); |
|
1672 |
cmpptr(java_thread, rax); |
|
1673 |
jcc(Assembler::equal, L); |
|
1674 |
STOP("MacroAssembler::call_VM_base: rdi not callee saved?"); |
|
1675 |
bind(L); |
|
1676 |
} |
|
1677 |
pop(rax); |
|
1678 |
#endif |
|
1679 |
} else { |
|
1680 |
get_thread(java_thread); |
|
1681 |
} |
|
1682 |
// reset last Java frame |
|
1683 |
// Only interpreter should have to clear fp |
|
1684 |
reset_last_Java_frame(java_thread, true, false); |
|
1685 |
||
1686 |
#ifndef CC_INTERP |
|
1687 |
// C++ interp handles this in the interpreter |
|
1688 |
check_and_handle_popframe(java_thread); |
|
1689 |
check_and_handle_earlyret(java_thread); |
|
1690 |
#endif /* CC_INTERP */ |
|
1691 |
||
1692 |
if (check_exceptions) { |
|
1693 |
// check for pending exceptions (java_thread is set upon return) |
|
1694 |
cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); |
|
1695 |
#ifndef _LP64 |
|
1696 |
jump_cc(Assembler::notEqual, |
|
1697 |
RuntimeAddress(StubRoutines::forward_exception_entry())); |
|
1698 |
#else |
|
1699 |
// This used to conditionally jump to forward_exception however it is |
|
1700 |
// possible if we relocate that the branch will not reach. So we must jump |
|
1701 |
// around so we can always reach |
|
1702 |
||
1703 |
Label ok; |
|
1704 |
jcc(Assembler::equal, ok); |
|
1705 |
jump(RuntimeAddress(StubRoutines::forward_exception_entry())); |
|
1706 |
bind(ok); |
|
1707 |
#endif // LP64 |
|
1708 |
} |
|
1709 |
||
1710 |
// get oop result if there is one and reset the value in the thread |
|
1711 |
if (oop_result->is_valid()) { |
|
1712 |
get_vm_result(oop_result, java_thread); |
|
1713 |
} |
|
1714 |
} |
|
1715 |
||
1716 |
void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { |
|
1717 |
||
1718 |
// Calculate the value for last_Java_sp |
|
1719 |
// somewhat subtle. call_VM does an intermediate call |
|
1720 |
// which places a return address on the stack just under the |
|
1721 |
// stack pointer as the user finsihed with it. This allows |
|
1722 |
// use to retrieve last_Java_pc from last_Java_sp[-1]. |
|
1723 |
// On 32bit we then have to push additional args on the stack to accomplish |
|
1724 |
// the actual requested call. On 64bit call_VM only can use register args |
|
1725 |
// so the only extra space is the return address that call_VM created. |
|
1726 |
// This hopefully explains the calculations here. |
|
1727 |
||
1728 |
#ifdef _LP64 |
|
1729 |
// We've pushed one address, correct last_Java_sp |
|
1730 |
lea(rax, Address(rsp, wordSize)); |
|
1731 |
#else |
|
1732 |
lea(rax, Address(rsp, (1 + number_of_arguments) * wordSize)); |
|
1733 |
#endif // LP64 |
|
1734 |
||
1735 |
call_VM_base(oop_result, noreg, rax, entry_point, number_of_arguments, check_exceptions); |
|
1736 |
||
1737 |
} |
|
1738 |
||
1739 |
void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { |
|
1740 |
call_VM_leaf_base(entry_point, number_of_arguments); |
|
1741 |
} |
|
1742 |
||
1743 |
void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { |
|
1744 |
pass_arg0(this, arg_0); |
|
1745 |
call_VM_leaf(entry_point, 1); |
|
1746 |
} |
|
1747 |
||
1748 |
void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { |
|
1749 |
||
1750 |
LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg")); |
|
1751 |
pass_arg1(this, arg_1); |
|
1752 |
pass_arg0(this, arg_0); |
|
1753 |
call_VM_leaf(entry_point, 2); |
|
1754 |
} |
|
1755 |
||
1756 |
void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { |
|
1757 |
LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg")); |
|
1758 |
LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); |
|
1759 |
pass_arg2(this, arg_2); |
|
1760 |
LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg")); |
|
1761 |
pass_arg1(this, arg_1); |
|
1762 |
pass_arg0(this, arg_0); |
|
1763 |
call_VM_leaf(entry_point, 3); |
|
1764 |
} |
|
1765 |
||
1766 |
void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { |
|
1767 |
pass_arg0(this, arg_0); |
|
1768 |
MacroAssembler::call_VM_leaf_base(entry_point, 1); |
|
1769 |
} |
|
1770 |
||
1771 |
void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { |
|
1772 |
||
1773 |
LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg")); |
|
1774 |
pass_arg1(this, arg_1); |
|
1775 |
pass_arg0(this, arg_0); |
|
1776 |
MacroAssembler::call_VM_leaf_base(entry_point, 2); |
|
1777 |
} |
|
1778 |
||
1779 |
void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { |
|
1780 |
LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg")); |
|
1781 |
LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); |
|
1782 |
pass_arg2(this, arg_2); |
|
1783 |
LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg")); |
|
1784 |
pass_arg1(this, arg_1); |
|
1785 |
pass_arg0(this, arg_0); |
|
1786 |
MacroAssembler::call_VM_leaf_base(entry_point, 3); |
|
1787 |
} |
|
1788 |
||
1789 |
void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { |
|
1790 |
LP64_ONLY(assert(arg_0 != c_rarg3, "smashed arg")); |
|
1791 |
LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg")); |
|
1792 |
LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg")); |
|
1793 |
pass_arg3(this, arg_3); |
|
1794 |
LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg")); |
|
1795 |
LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); |
|
1796 |
pass_arg2(this, arg_2); |
|
1797 |
LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg")); |
|
1798 |
pass_arg1(this, arg_1); |
|
1799 |
pass_arg0(this, arg_0); |
|
1800 |
MacroAssembler::call_VM_leaf_base(entry_point, 4); |
|
1801 |
} |
|
1802 |
||
1803 |
void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) { |
|
1804 |
movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset())); |
|
1805 |
movptr(Address(java_thread, JavaThread::vm_result_offset()), NULL_WORD); |
|
1806 |
verify_oop(oop_result, "broken oop in call_VM_base"); |
|
1807 |
} |
|
1808 |
||
1809 |
void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) { |
|
1810 |
movptr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset())); |
|
1811 |
movptr(Address(java_thread, JavaThread::vm_result_2_offset()), NULL_WORD); |
|
1812 |
} |
|
1813 |
||
1814 |
void MacroAssembler::check_and_handle_earlyret(Register java_thread) { |
|
1815 |
} |
|
1816 |
||
1817 |
void MacroAssembler::check_and_handle_popframe(Register java_thread) { |
|
1818 |
} |
|
1819 |
||
1820 |
void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm) { |
|
1821 |
if (reachable(src1)) { |
|
1822 |
cmpl(as_Address(src1), imm); |
|
1823 |
} else { |
|
1824 |
lea(rscratch1, src1); |
|
1825 |
cmpl(Address(rscratch1, 0), imm); |
|
1826 |
} |
|
1827 |
} |
|
1828 |
||
1829 |
void MacroAssembler::cmp32(Register src1, AddressLiteral src2) { |
|
1830 |
assert(!src2.is_lval(), "use cmpptr"); |
|
1831 |
if (reachable(src2)) { |
|
1832 |
cmpl(src1, as_Address(src2)); |
|
1833 |
} else { |
|
1834 |
lea(rscratch1, src2); |
|
1835 |
cmpl(src1, Address(rscratch1, 0)); |
|
1836 |
} |
|
1837 |
} |
|
1838 |
||
1839 |
void MacroAssembler::cmp32(Register src1, int32_t imm) { |
|
1840 |
Assembler::cmpl(src1, imm); |
|
1841 |
} |
|
1842 |
||
1843 |
void MacroAssembler::cmp32(Register src1, Address src2) { |
|
1844 |
Assembler::cmpl(src1, src2); |
|
1845 |
} |
|
1846 |
||
1847 |
void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { |
|
1848 |
ucomisd(opr1, opr2); |
|
1849 |
||
1850 |
Label L; |
|
1851 |
if (unordered_is_less) { |
|
1852 |
movl(dst, -1); |
|
1853 |
jcc(Assembler::parity, L); |
|
1854 |
jcc(Assembler::below , L); |
|
1855 |
movl(dst, 0); |
|
1856 |
jcc(Assembler::equal , L); |
|
1857 |
increment(dst); |
|
1858 |
} else { // unordered is greater |
|
1859 |
movl(dst, 1); |
|
1860 |
jcc(Assembler::parity, L); |
|
1861 |
jcc(Assembler::above , L); |
|
1862 |
movl(dst, 0); |
|
1863 |
jcc(Assembler::equal , L); |
|
1864 |
decrementl(dst); |
|
1865 |
} |
|
1866 |
bind(L); |
|
1867 |
} |
|
1868 |
||
1869 |
void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { |
|
1870 |
ucomiss(opr1, opr2); |
|
1871 |
||
1872 |
Label L; |
|
1873 |
if (unordered_is_less) { |
|
1874 |
movl(dst, -1); |
|
1875 |
jcc(Assembler::parity, L); |
|
1876 |
jcc(Assembler::below , L); |
|
1877 |
movl(dst, 0); |
|
1878 |
jcc(Assembler::equal , L); |
|
1879 |
increment(dst); |
|
1880 |
} else { // unordered is greater |
|
1881 |
movl(dst, 1); |
|
1882 |
jcc(Assembler::parity, L); |
|
1883 |
jcc(Assembler::above , L); |
|
1884 |
movl(dst, 0); |
|
1885 |
jcc(Assembler::equal , L); |
|
1886 |
decrementl(dst); |
|
1887 |
} |
|
1888 |
bind(L); |
|
1889 |
} |
|
1890 |
||
1891 |
||
1892 |
void MacroAssembler::cmp8(AddressLiteral src1, int imm) { |
|
1893 |
if (reachable(src1)) { |
|
1894 |
cmpb(as_Address(src1), imm); |
|
1895 |
} else { |
|
1896 |
lea(rscratch1, src1); |
|
1897 |
cmpb(Address(rscratch1, 0), imm); |
|
1898 |
} |
|
1899 |
} |
|
1900 |
||
1901 |
void MacroAssembler::cmpptr(Register src1, AddressLiteral src2) { |
|
1902 |
#ifdef _LP64 |
|
1903 |
if (src2.is_lval()) { |
|
1904 |
movptr(rscratch1, src2); |
|
1905 |
Assembler::cmpq(src1, rscratch1); |
|
1906 |
} else if (reachable(src2)) { |
|
1907 |
cmpq(src1, as_Address(src2)); |
|
1908 |
} else { |
|
1909 |
lea(rscratch1, src2); |
|
1910 |
Assembler::cmpq(src1, Address(rscratch1, 0)); |
|
1911 |
} |
|
1912 |
#else |
|
1913 |
if (src2.is_lval()) { |
|
1914 |
cmp_literal32(src1, (int32_t) src2.target(), src2.rspec()); |
|
1915 |
} else { |
|
1916 |
cmpl(src1, as_Address(src2)); |
|
1917 |
} |
|
1918 |
#endif // _LP64 |
|
1919 |
} |
|
1920 |
||
1921 |
void MacroAssembler::cmpptr(Address src1, AddressLiteral src2) { |
|
1922 |
assert(src2.is_lval(), "not a mem-mem compare"); |
|
1923 |
#ifdef _LP64 |
|
1924 |
// moves src2's literal address |
|
1925 |
movptr(rscratch1, src2); |
|
1926 |
Assembler::cmpq(src1, rscratch1); |
|
1927 |
#else |
|
1928 |
cmp_literal32(src1, (int32_t) src2.target(), src2.rspec()); |
|
1929 |
#endif // _LP64 |
|
1930 |
} |
|
1931 |
||
1932 |
void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr) { |
|
1933 |
if (reachable(adr)) { |
|
1934 |
if (os::is_MP()) |
|
1935 |
lock(); |
|
1936 |
cmpxchgptr(reg, as_Address(adr)); |
|
1937 |
} else { |
|
1938 |
lea(rscratch1, adr); |
|
1939 |
if (os::is_MP()) |
|
1940 |
lock(); |
|
1941 |
cmpxchgptr(reg, Address(rscratch1, 0)); |
|
1942 |
} |
|
1943 |
} |
|
1944 |
||
1945 |
void MacroAssembler::cmpxchgptr(Register reg, Address adr) { |
|
1946 |
LP64_ONLY(cmpxchgq(reg, adr)) NOT_LP64(cmpxchgl(reg, adr)); |
|
1947 |
} |
|
1948 |
||
1949 |
void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src) { |
|
1950 |
if (reachable(src)) { |
|
1951 |
Assembler::comisd(dst, as_Address(src)); |
|
1952 |
} else { |
|
1953 |
lea(rscratch1, src); |
|
1954 |
Assembler::comisd(dst, Address(rscratch1, 0)); |
|
1955 |
} |
|
1956 |
} |
|
1957 |
||
1958 |
void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src) { |
|
1959 |
if (reachable(src)) { |
|
1960 |
Assembler::comiss(dst, as_Address(src)); |
|
1961 |
} else { |
|
1962 |
lea(rscratch1, src); |
|
1963 |
Assembler::comiss(dst, Address(rscratch1, 0)); |
|
1964 |
} |
|
1965 |
} |
|
1966 |
||
1967 |
||
1968 |
void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr) { |
|
1969 |
Condition negated_cond = negate_condition(cond); |
|
1970 |
Label L; |
|
1971 |
jcc(negated_cond, L); |
|
1972 |
atomic_incl(counter_addr); |
|
1973 |
bind(L); |
|
1974 |
} |
|
1975 |
||
1976 |
int MacroAssembler::corrected_idivl(Register reg) { |
|
1977 |
// Full implementation of Java idiv and irem; checks for |
|
1978 |
// special case as described in JVM spec., p.243 & p.271. |
|
1979 |
// The function returns the (pc) offset of the idivl |
|
1980 |
// instruction - may be needed for implicit exceptions. |
|
1981 |
// |
|
1982 |
// normal case special case |
|
1983 |
// |
|
1984 |
// input : rax,: dividend min_int |
|
1985 |
// reg: divisor (may not be rax,/rdx) -1 |
|
1986 |
// |
|
1987 |
// output: rax,: quotient (= rax, idiv reg) min_int |
|
1988 |
// rdx: remainder (= rax, irem reg) 0 |
|
1989 |
assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register"); |
|
1990 |
const int min_int = 0x80000000; |
|
1991 |
Label normal_case, special_case; |
|
1992 |
||
1993 |
// check for special case |
|
1994 |
cmpl(rax, min_int); |
|
1995 |
jcc(Assembler::notEqual, normal_case); |
|
1996 |
xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0) |
|
1997 |
cmpl(reg, -1); |
|
1998 |
jcc(Assembler::equal, special_case); |
|
1999 |
||
2000 |
// handle normal case |
|
2001 |
bind(normal_case); |
|
2002 |
cdql(); |
|
2003 |
int idivl_offset = offset(); |
|
2004 |
idivl(reg); |
|
2005 |
||
2006 |
// normal and special case exit |
|
2007 |
bind(special_case); |
|
2008 |
||
2009 |
return idivl_offset; |
|
2010 |
} |
|
2011 |
||
2012 |
||
2013 |
||
2014 |
void MacroAssembler::decrementl(Register reg, int value) { |
|
2015 |
if (value == min_jint) {subl(reg, value) ; return; } |
|
2016 |
if (value < 0) { incrementl(reg, -value); return; } |
|
2017 |
if (value == 0) { ; return; } |
|
2018 |
if (value == 1 && UseIncDec) { decl(reg) ; return; } |
|
2019 |
/* else */ { subl(reg, value) ; return; } |
|
2020 |
} |
|
2021 |
||
2022 |
void MacroAssembler::decrementl(Address dst, int value) { |
|
2023 |
if (value == min_jint) {subl(dst, value) ; return; } |
|
2024 |
if (value < 0) { incrementl(dst, -value); return; } |
|
2025 |
if (value == 0) { ; return; } |
|
2026 |
if (value == 1 && UseIncDec) { decl(dst) ; return; } |
|
2027 |
/* else */ { subl(dst, value) ; return; } |
|
2028 |
} |
|
2029 |
||
2030 |
void MacroAssembler::division_with_shift (Register reg, int shift_value) { |
|
2031 |
assert (shift_value > 0, "illegal shift value"); |
|
2032 |
Label _is_positive; |
|
2033 |
testl (reg, reg); |
|
2034 |
jcc (Assembler::positive, _is_positive); |
|
2035 |
int offset = (1 << shift_value) - 1 ; |
|
2036 |
||
2037 |
if (offset == 1) { |
|
2038 |
incrementl(reg); |
|
2039 |
} else { |
|
2040 |
addl(reg, offset); |
|
2041 |
} |
|
2042 |
||
2043 |
bind (_is_positive); |
|
2044 |
sarl(reg, shift_value); |
|
2045 |
} |
|
2046 |
||
2047 |
void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src) { |
|
2048 |
if (reachable(src)) { |
|
2049 |
Assembler::divsd(dst, as_Address(src)); |
|
2050 |
} else { |
|
2051 |
lea(rscratch1, src); |
|
2052 |
Assembler::divsd(dst, Address(rscratch1, 0)); |
|
2053 |
} |
|
2054 |
} |
|
2055 |
||
2056 |
void MacroAssembler::divss(XMMRegister dst, AddressLiteral src) { |
|
2057 |
if (reachable(src)) { |
|
2058 |
Assembler::divss(dst, as_Address(src)); |
|
2059 |
} else { |
|
2060 |
lea(rscratch1, src); |
|
2061 |
Assembler::divss(dst, Address(rscratch1, 0)); |
|
2062 |
} |
|
2063 |
} |
|
2064 |
||
2065 |
// !defined(COMPILER2) is because of stupid core builds |
|
2066 |
#if !defined(_LP64) || defined(COMPILER1) || !defined(COMPILER2) |
|
2067 |
void MacroAssembler::empty_FPU_stack() { |
|
2068 |
if (VM_Version::supports_mmx()) { |
|
2069 |
emms(); |
|
2070 |
} else { |
|
2071 |
for (int i = 8; i-- > 0; ) ffree(i); |
|
2072 |
} |
|
2073 |
} |
|
2074 |
#endif // !LP64 || C1 || !C2 |
|
2075 |
||
2076 |
||
2077 |
// Defines obj, preserves var_size_in_bytes |
|
2078 |
void MacroAssembler::eden_allocate(Register obj, |
|
2079 |
Register var_size_in_bytes, |
|
2080 |
int con_size_in_bytes, |
|
2081 |
Register t1, |
|
2082 |
Label& slow_case) { |
|
2083 |
assert(obj == rax, "obj must be in rax, for cmpxchg"); |
|
2084 |
assert_different_registers(obj, var_size_in_bytes, t1); |
|
2085 |
if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { |
|
2086 |
jmp(slow_case); |
|
2087 |
} else { |
|
2088 |
Register end = t1; |
|
2089 |
Label retry; |
|
2090 |
bind(retry); |
|
2091 |
ExternalAddress heap_top((address) Universe::heap()->top_addr()); |
|
2092 |
movptr(obj, heap_top); |
|
2093 |
if (var_size_in_bytes == noreg) { |
|
2094 |
lea(end, Address(obj, con_size_in_bytes)); |
|
2095 |
} else { |
|
2096 |
lea(end, Address(obj, var_size_in_bytes, Address::times_1)); |
|
2097 |
} |
|
2098 |
// if end < obj then we wrapped around => object too long => slow case |
|
2099 |
cmpptr(end, obj); |
|
2100 |
jcc(Assembler::below, slow_case); |
|
2101 |
cmpptr(end, ExternalAddress((address) Universe::heap()->end_addr())); |
|
2102 |
jcc(Assembler::above, slow_case); |
|
2103 |
// Compare obj with the top addr, and if still equal, store the new top addr in |
|
2104 |
// end at the address of the top addr pointer. Sets ZF if was equal, and clears |
|
2105 |
// it otherwise. Use lock prefix for atomicity on MPs. |
|
2106 |
locked_cmpxchgptr(end, heap_top); |
|
2107 |
jcc(Assembler::notEqual, retry); |
|
2108 |
} |
|
2109 |
} |
|
2110 |
||
2111 |
void MacroAssembler::enter() { |
|
2112 |
push(rbp); |
|
2113 |
mov(rbp, rsp); |
|
2114 |
} |
|
2115 |
||
2116 |
// A 5 byte nop that is safe for patching (see patch_verified_entry) |
|
2117 |
void MacroAssembler::fat_nop() { |
|
2118 |
if (UseAddressNop) { |
|
2119 |
addr_nop_5(); |
|
2120 |
} else { |
|
14837
a75c3082d106
8004250: replace AbstractAssembler a_byte/a_long with emit_int8/emit_int32
twisti
parents:
14834
diff
changeset
|
2121 |
emit_int8(0x26); // es: |
a75c3082d106
8004250: replace AbstractAssembler a_byte/a_long with emit_int8/emit_int32
twisti
parents:
14834
diff
changeset
|
2122 |
emit_int8(0x2e); // cs: |
a75c3082d106
8004250: replace AbstractAssembler a_byte/a_long with emit_int8/emit_int32
twisti
parents:
14834
diff
changeset
|
2123 |
emit_int8(0x64); // fs: |
a75c3082d106
8004250: replace AbstractAssembler a_byte/a_long with emit_int8/emit_int32
twisti
parents:
14834
diff
changeset
|
2124 |
emit_int8(0x65); // gs: |
a75c3082d106
8004250: replace AbstractAssembler a_byte/a_long with emit_int8/emit_int32
twisti
parents:
14834
diff
changeset
|
2125 |
emit_int8((unsigned char)0x90); |
14626 | 2126 |
} |
2127 |
} |
|
2128 |
||
2129 |
void MacroAssembler::fcmp(Register tmp) { |
|
2130 |
fcmp(tmp, 1, true, true); |
|
2131 |
} |
|
2132 |
||
2133 |
void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) { |
|
2134 |
assert(!pop_right || pop_left, "usage error"); |
|
2135 |
if (VM_Version::supports_cmov()) { |
|
2136 |
assert(tmp == noreg, "unneeded temp"); |
|
2137 |
if (pop_left) { |
|
2138 |
fucomip(index); |
|
2139 |
} else { |
|
2140 |
fucomi(index); |
|
2141 |
} |
|
2142 |
if (pop_right) { |
|
2143 |
fpop(); |
|
2144 |
} |
|
2145 |
} else { |
|
2146 |
assert(tmp != noreg, "need temp"); |
|
2147 |
if (pop_left) { |
|
2148 |
if (pop_right) { |
|
2149 |
fcompp(); |
|
2150 |
} else { |
|
2151 |
fcomp(index); |
|
2152 |
} |
|
2153 |
} else { |
|
2154 |
fcom(index); |
|
2155 |
} |
|
2156 |
// convert FPU condition into eflags condition via rax, |
|
2157 |
save_rax(tmp); |
|
2158 |
fwait(); fnstsw_ax(); |
|
2159 |
sahf(); |
|
2160 |
restore_rax(tmp); |
|
2161 |
} |
|
2162 |
// condition codes set as follows: |
|
2163 |
// |
|
2164 |
// CF (corresponds to C0) if x < y |
|
2165 |
// PF (corresponds to C2) if unordered |
|
2166 |
// ZF (corresponds to C3) if x = y |
|
2167 |
} |
|
2168 |
||
2169 |
void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less) { |
|
2170 |
fcmp2int(dst, unordered_is_less, 1, true, true); |
|
2171 |
} |
|
2172 |
||
2173 |
void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right) { |
|
2174 |
fcmp(VM_Version::supports_cmov() ? noreg : dst, index, pop_left, pop_right); |
|
2175 |
Label L; |
|
2176 |
if (unordered_is_less) { |
|
2177 |
movl(dst, -1); |
|
2178 |
jcc(Assembler::parity, L); |
|
2179 |
jcc(Assembler::below , L); |
|
2180 |
movl(dst, 0); |
|
2181 |
jcc(Assembler::equal , L); |
|
2182 |
increment(dst); |
|
2183 |
} else { // unordered is greater |
|
2184 |
movl(dst, 1); |
|
2185 |
jcc(Assembler::parity, L); |
|
2186 |
jcc(Assembler::above , L); |
|
2187 |
movl(dst, 0); |
|
2188 |
jcc(Assembler::equal , L); |
|
2189 |
decrementl(dst); |
|
2190 |
} |
|
2191 |
bind(L); |
|
2192 |
} |
|
2193 |
||
2194 |
void MacroAssembler::fld_d(AddressLiteral src) { |
|
2195 |
fld_d(as_Address(src)); |
|
2196 |
} |
|
2197 |
||
2198 |
void MacroAssembler::fld_s(AddressLiteral src) { |
|
2199 |
fld_s(as_Address(src)); |
|
2200 |
} |
|
2201 |
||
2202 |
void MacroAssembler::fld_x(AddressLiteral src) { |
|
2203 |
Assembler::fld_x(as_Address(src)); |
|
2204 |
} |
|
2205 |
||
2206 |
void MacroAssembler::fldcw(AddressLiteral src) { |
|
2207 |
Assembler::fldcw(as_Address(src)); |
|
2208 |
} |
|
2209 |
||
2210 |
void MacroAssembler::pow_exp_core_encoding() { |
|
2211 |
// kills rax, rcx, rdx |
|
2212 |
subptr(rsp,sizeof(jdouble)); |
|
2213 |
// computes 2^X. Stack: X ... |
|
2214 |
// f2xm1 computes 2^X-1 but only operates on -1<=X<=1. Get int(X) and |
|
2215 |
// keep it on the thread's stack to compute 2^int(X) later |
|
2216 |
// then compute 2^(X-int(X)) as (2^(X-int(X)-1+1) |
|
2217 |
// final result is obtained with: 2^X = 2^int(X) * 2^(X-int(X)) |
|
2218 |
fld_s(0); // Stack: X X ... |
|
2219 |
frndint(); // Stack: int(X) X ... |
|
2220 |
fsuba(1); // Stack: int(X) X-int(X) ... |
|
2221 |
fistp_s(Address(rsp,0)); // move int(X) as integer to thread's stack. Stack: X-int(X) ... |
|
2222 |
f2xm1(); // Stack: 2^(X-int(X))-1 ... |
|
2223 |
fld1(); // Stack: 1 2^(X-int(X))-1 ... |
|
2224 |
faddp(1); // Stack: 2^(X-int(X)) |
|
2225 |
// computes 2^(int(X)): add exponent bias (1023) to int(X), then |
|
2226 |
// shift int(X)+1023 to exponent position. |
|
2227 |
// Exponent is limited to 11 bits if int(X)+1023 does not fit in 11 |
|
2228 |
// bits, set result to NaN. 0x000 and 0x7FF are reserved exponent |
|
2229 |
// values so detect them and set result to NaN. |
|
2230 |
movl(rax,Address(rsp,0)); |
|
2231 |
movl(rcx, -2048); // 11 bit mask and valid NaN binary encoding |
|
2232 |
addl(rax, 1023); |
|
2233 |
movl(rdx,rax); |
|
2234 |
shll(rax,20); |
|
2235 |
// Check that 0 < int(X)+1023 < 2047. Otherwise set rax to NaN. |
|
2236 |
addl(rdx,1); |
|
2237 |
// Check that 1 < int(X)+1023+1 < 2048 |
|
2238 |
// in 3 steps: |
|
2239 |
// 1- (int(X)+1023+1)&-2048 == 0 => 0 <= int(X)+1023+1 < 2048 |
|
2240 |
// 2- (int(X)+1023+1)&-2048 != 0 |
|
2241 |
// 3- (int(X)+1023+1)&-2048 != 1 |
|
2242 |
// Do 2- first because addl just updated the flags. |
|
2243 |
cmov32(Assembler::equal,rax,rcx); |
|
2244 |
cmpl(rdx,1); |
|
2245 |
cmov32(Assembler::equal,rax,rcx); |
|
2246 |
testl(rdx,rcx); |
|
2247 |
cmov32(Assembler::notEqual,rax,rcx); |
|
2248 |
movl(Address(rsp,4),rax); |
|
2249 |
movl(Address(rsp,0),0); |
|
2250 |
fmul_d(Address(rsp,0)); // Stack: 2^X ... |
|
2251 |
addptr(rsp,sizeof(jdouble)); |
|
2252 |
} |
|
2253 |
||
2254 |
void MacroAssembler::increase_precision() { |
|
2255 |
subptr(rsp, BytesPerWord); |
|
2256 |
fnstcw(Address(rsp, 0)); |
|
2257 |
movl(rax, Address(rsp, 0)); |
|
2258 |
orl(rax, 0x300); |
|
2259 |
push(rax); |
|
2260 |
fldcw(Address(rsp, 0)); |
|
2261 |
pop(rax); |
|
2262 |
} |
|
2263 |
||
2264 |
void MacroAssembler::restore_precision() { |
|
2265 |
fldcw(Address(rsp, 0)); |
|
2266 |
addptr(rsp, BytesPerWord); |
|
2267 |
} |
|
2268 |
||
2269 |
void MacroAssembler::fast_pow() { |
|
2270 |
// computes X^Y = 2^(Y * log2(X)) |
|
2271 |
// if fast computation is not possible, result is NaN. Requires |
|
2272 |
// fallback from user of this macro. |
|
2273 |
// increase precision for intermediate steps of the computation |
|
2274 |
increase_precision(); |
|
2275 |
fyl2x(); // Stack: (Y*log2(X)) ... |
|
2276 |
pow_exp_core_encoding(); // Stack: exp(X) ... |
|
2277 |
restore_precision(); |
|
2278 |
} |
|
2279 |
||
2280 |
void MacroAssembler::fast_exp() { |
|
2281 |
// computes exp(X) = 2^(X * log2(e)) |
|
2282 |
// if fast computation is not possible, result is NaN. Requires |
|
2283 |
// fallback from user of this macro. |
|
2284 |
// increase precision for intermediate steps of the computation |
|
2285 |
increase_precision(); |
|
2286 |
fldl2e(); // Stack: log2(e) X ... |
|
2287 |
fmulp(1); // Stack: (X*log2(e)) ... |
|
2288 |
pow_exp_core_encoding(); // Stack: exp(X) ... |
|
2289 |
restore_precision(); |
|
2290 |
} |
|
2291 |
||
2292 |
void MacroAssembler::pow_or_exp(bool is_exp, int num_fpu_regs_in_use) { |
|
2293 |
// kills rax, rcx, rdx |
|
2294 |
// pow and exp needs 2 extra registers on the fpu stack. |
|
2295 |
Label slow_case, done; |
|
2296 |
Register tmp = noreg; |
|
2297 |
if (!VM_Version::supports_cmov()) { |
|
2298 |
// fcmp needs a temporary so preserve rdx, |
|
2299 |
tmp = rdx; |
|
2300 |
} |
|
2301 |
Register tmp2 = rax; |
|
2302 |
Register tmp3 = rcx; |
|
2303 |
||
2304 |
if (is_exp) { |
|
2305 |
// Stack: X |
|
2306 |
fld_s(0); // duplicate argument for runtime call. Stack: X X |
|
2307 |
fast_exp(); // Stack: exp(X) X |
|
2308 |
fcmp(tmp, 0, false, false); // Stack: exp(X) X |
|
2309 |
// exp(X) not equal to itself: exp(X) is NaN go to slow case. |
|
2310 |
jcc(Assembler::parity, slow_case); |
|
2311 |
// get rid of duplicate argument. Stack: exp(X) |
|
2312 |
if (num_fpu_regs_in_use > 0) { |
|
2313 |
fxch(); |
|
2314 |
fpop(); |
|
2315 |
} else { |
|
2316 |
ffree(1); |
|
2317 |
} |
|
2318 |
jmp(done); |
|
2319 |
} else { |
|
2320 |
// Stack: X Y |
|
2321 |
Label x_negative, y_odd; |
|
2322 |
||
2323 |
fldz(); // Stack: 0 X Y |
|
2324 |
fcmp(tmp, 1, true, false); // Stack: X Y |
|
2325 |
jcc(Assembler::above, x_negative); |
|
2326 |
||
2327 |
// X >= 0 |
|
2328 |
||
2329 |
fld_s(1); // duplicate arguments for runtime call. Stack: Y X Y |
|
2330 |
fld_s(1); // Stack: X Y X Y |
|
2331 |
fast_pow(); // Stack: X^Y X Y |
|
2332 |
fcmp(tmp, 0, false, false); // Stack: X^Y X Y |
|
2333 |
// X^Y not equal to itself: X^Y is NaN go to slow case. |
|
2334 |
jcc(Assembler::parity, slow_case); |
|
2335 |
// get rid of duplicate arguments. Stack: X^Y |
|
2336 |
if (num_fpu_regs_in_use > 0) { |
|
2337 |
fxch(); fpop(); |
|
2338 |
fxch(); fpop(); |
|
2339 |
} else { |
|
2340 |
ffree(2); |
|
2341 |
ffree(1); |
|
2342 |
} |
|
2343 |
jmp(done); |
|
2344 |
||
2345 |
// X <= 0 |
|
2346 |
bind(x_negative); |
|
2347 |
||
2348 |
fld_s(1); // Stack: Y X Y |
|
2349 |
frndint(); // Stack: int(Y) X Y |
|
2350 |
fcmp(tmp, 2, false, false); // Stack: int(Y) X Y |
|
2351 |
jcc(Assembler::notEqual, slow_case); |
|
2352 |
||
2353 |
subptr(rsp, 8); |
|
2354 |
||
2355 |
// For X^Y, when X < 0, Y has to be an integer and the final |
|
2356 |
// result depends on whether it's odd or even. We just checked |
|
2357 |
// that int(Y) == Y. We move int(Y) to gp registers as a 64 bit |
|
2358 |
// integer to test its parity. If int(Y) is huge and doesn't fit |
|
2359 |
// in the 64 bit integer range, the integer indefinite value will |
|
2360 |
// end up in the gp registers. Huge numbers are all even, the |
|
2361 |
// integer indefinite number is even so it's fine. |
|
2362 |
||
2363 |
#ifdef ASSERT |
|
2364 |
// Let's check we don't end up with an integer indefinite number |
|
2365 |
// when not expected. First test for huge numbers: check whether |
|
2366 |
// int(Y)+1 == int(Y) which is true for very large numbers and |
|
2367 |
// those are all even. A 64 bit integer is guaranteed to not |
|
2368 |
// overflow for numbers where y+1 != y (when precision is set to |
|
2369 |
// double precision). |
|
2370 |
Label y_not_huge; |
|
2371 |
||
2372 |
fld1(); // Stack: 1 int(Y) X Y |
|
2373 |
fadd(1); // Stack: 1+int(Y) int(Y) X Y |
|
2374 |
||
2375 |
#ifdef _LP64 |
|
2376 |
// trip to memory to force the precision down from double extended |
|
2377 |
// precision |
|
2378 |
fstp_d(Address(rsp, 0)); |
|
2379 |
fld_d(Address(rsp, 0)); |
|
2380 |
#endif |
|
2381 |
||
2382 |
fcmp(tmp, 1, true, false); // Stack: int(Y) X Y |
|
2383 |
#endif |
|
2384 |
||
2385 |
// move int(Y) as 64 bit integer to thread's stack |
|
2386 |
fistp_d(Address(rsp,0)); // Stack: X Y |
|
2387 |
||
2388 |
#ifdef ASSERT |
|
2389 |
jcc(Assembler::notEqual, y_not_huge); |
|
2390 |
||
2391 |
// Y is huge so we know it's even. It may not fit in a 64 bit |
|
2392 |
// integer and we don't want the debug code below to see the |
|
2393 |
// integer indefinite value so overwrite int(Y) on the thread's |
|
2394 |
// stack with 0. |
|
2395 |
movl(Address(rsp, 0), 0); |
|
2396 |
movl(Address(rsp, 4), 0); |
|
2397 |
||
2398 |
bind(y_not_huge); |
|
2399 |
#endif |
|
2400 |
||
2401 |
fld_s(1); // duplicate arguments for runtime call. Stack: Y X Y |
|
2402 |
fld_s(1); // Stack: X Y X Y |
|
2403 |
fabs(); // Stack: abs(X) Y X Y |
|
2404 |
fast_pow(); // Stack: abs(X)^Y X Y |
|
2405 |
fcmp(tmp, 0, false, false); // Stack: abs(X)^Y X Y |
|
2406 |
// abs(X)^Y not equal to itself: abs(X)^Y is NaN go to slow case. |
|
2407 |
||
2408 |
pop(tmp2); |
|
2409 |
NOT_LP64(pop(tmp3)); |
|
2410 |
jcc(Assembler::parity, slow_case); |
|
2411 |
||
2412 |
#ifdef ASSERT |
|
2413 |
// Check that int(Y) is not integer indefinite value (int |
|
2414 |
// overflow). Shouldn't happen because for values that would |
|
2415 |
// overflow, 1+int(Y)==Y which was tested earlier. |
|
2416 |
#ifndef _LP64 |
|
2417 |
{ |
|
2418 |
Label integer; |
|
2419 |
testl(tmp2, tmp2); |
|
2420 |
jcc(Assembler::notZero, integer); |
|
2421 |
cmpl(tmp3, 0x80000000); |
|
2422 |
jcc(Assembler::notZero, integer); |
|
2423 |
STOP("integer indefinite value shouldn't be seen here"); |
|
2424 |
bind(integer); |
|
2425 |
} |
|
2426 |
#else |
|
2427 |
{ |
|
2428 |
Label integer; |
|
2429 |
mov(tmp3, tmp2); // preserve tmp2 for parity check below |
|
2430 |
shlq(tmp3, 1); |
|
2431 |
jcc(Assembler::carryClear, integer); |
|
2432 |
jcc(Assembler::notZero, integer); |
|
2433 |
STOP("integer indefinite value shouldn't be seen here"); |
|
2434 |
bind(integer); |
|
2435 |
} |
|
2436 |
#endif |
|
2437 |
#endif |
|
2438 |
||
2439 |
// get rid of duplicate arguments. Stack: X^Y |
|
2440 |
if (num_fpu_regs_in_use > 0) { |
|
2441 |
fxch(); fpop(); |
|
2442 |
fxch(); fpop(); |
|
2443 |
} else { |
|
2444 |
ffree(2); |
|
2445 |
ffree(1); |
|
2446 |
} |
|
2447 |
||
2448 |
testl(tmp2, 1); |
|
2449 |
jcc(Assembler::zero, done); // X <= 0, Y even: X^Y = abs(X)^Y |
|
2450 |
// X <= 0, Y even: X^Y = -abs(X)^Y |
|
2451 |
||
2452 |
fchs(); // Stack: -abs(X)^Y Y |
|
2453 |
jmp(done); |
|
2454 |
} |
|
2455 |
||
2456 |
// slow case: runtime call |
|
2457 |
bind(slow_case); |
|
2458 |
||
2459 |
fpop(); // pop incorrect result or int(Y) |
|
2460 |
||
2461 |
fp_runtime_fallback(is_exp ? CAST_FROM_FN_PTR(address, SharedRuntime::dexp) : CAST_FROM_FN_PTR(address, SharedRuntime::dpow), |
|
2462 |
is_exp ? 1 : 2, num_fpu_regs_in_use); |
|
2463 |
||
2464 |
// Come here with result in F-TOS |
|
2465 |
bind(done); |
|
2466 |
} |
|
2467 |
||
2468 |
void MacroAssembler::fpop() { |
|
2469 |
ffree(); |
|
2470 |
fincstp(); |
|
2471 |
} |
|
2472 |
||
2473 |
void MacroAssembler::fremr(Register tmp) { |
|
2474 |
save_rax(tmp); |
|
2475 |
{ Label L; |
|
2476 |
bind(L); |
|
2477 |
fprem(); |
|
2478 |
fwait(); fnstsw_ax(); |
|
2479 |
#ifdef _LP64 |
|
2480 |
testl(rax, 0x400); |
|
2481 |
jcc(Assembler::notEqual, L); |
|
2482 |
#else |
|
2483 |
sahf(); |
|
2484 |
jcc(Assembler::parity, L); |
|
2485 |
#endif // _LP64 |
|
2486 |
} |
|
2487 |
restore_rax(tmp); |
|
2488 |
// Result is in ST0. |
|
2489 |
// Note: fxch & fpop to get rid of ST1 |
|
2490 |
// (otherwise FPU stack could overflow eventually) |
|
2491 |
fxch(1); |
|
2492 |
fpop(); |
|
2493 |
} |
|
2494 |
||
2495 |
||
2496 |
void MacroAssembler::incrementl(AddressLiteral dst) { |
|
2497 |
if (reachable(dst)) { |
|
2498 |
incrementl(as_Address(dst)); |
|
2499 |
} else { |
|
2500 |
lea(rscratch1, dst); |
|
2501 |
incrementl(Address(rscratch1, 0)); |
|
2502 |
} |
|
2503 |
} |
|
2504 |
||
2505 |
void MacroAssembler::incrementl(ArrayAddress dst) { |
|
2506 |
incrementl(as_Address(dst)); |
|
2507 |
} |
|
2508 |
||
2509 |
void MacroAssembler::incrementl(Register reg, int value) { |
|
2510 |
if (value == min_jint) {addl(reg, value) ; return; } |
|
2511 |
if (value < 0) { decrementl(reg, -value); return; } |
|
2512 |
if (value == 0) { ; return; } |
|
2513 |
if (value == 1 && UseIncDec) { incl(reg) ; return; } |
|
2514 |
/* else */ { addl(reg, value) ; return; } |
|
2515 |
} |
|
2516 |
||
2517 |
void MacroAssembler::incrementl(Address dst, int value) { |
|
2518 |
if (value == min_jint) {addl(dst, value) ; return; } |
|
2519 |
if (value < 0) { decrementl(dst, -value); return; } |
|
2520 |
if (value == 0) { ; return; } |
|
2521 |
if (value == 1 && UseIncDec) { incl(dst) ; return; } |
|
2522 |
/* else */ { addl(dst, value) ; return; } |
|
2523 |
} |
|
2524 |
||
2525 |
void MacroAssembler::jump(AddressLiteral dst) { |
|
2526 |
if (reachable(dst)) { |
|
2527 |
jmp_literal(dst.target(), dst.rspec()); |
|
2528 |
} else { |
|
2529 |
lea(rscratch1, dst); |
|
2530 |
jmp(rscratch1); |
|
2531 |
} |
|
2532 |
} |
|
2533 |
||
2534 |
void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst) { |
|
2535 |
if (reachable(dst)) { |
|
2536 |
InstructionMark im(this); |
|
2537 |
relocate(dst.reloc()); |
|
2538 |
const int short_size = 2; |
|
2539 |
const int long_size = 6; |
|
2540 |
int offs = (intptr_t)dst.target() - ((intptr_t)pc()); |
|
2541 |
if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) { |
|
2542 |
// 0111 tttn #8-bit disp |
|
14837
a75c3082d106
8004250: replace AbstractAssembler a_byte/a_long with emit_int8/emit_int32
twisti
parents:
14834
diff
changeset
|
2543 |
emit_int8(0x70 | cc); |
a75c3082d106
8004250: replace AbstractAssembler a_byte/a_long with emit_int8/emit_int32
twisti
parents:
14834
diff
changeset
|
2544 |
emit_int8((offs - short_size) & 0xFF); |
14626 | 2545 |
} else { |
2546 |
// 0000 1111 1000 tttn #32-bit disp |
|
14837
a75c3082d106
8004250: replace AbstractAssembler a_byte/a_long with emit_int8/emit_int32
twisti
parents:
14834
diff
changeset
|
2547 |
emit_int8(0x0F); |
a75c3082d106
8004250: replace AbstractAssembler a_byte/a_long with emit_int8/emit_int32
twisti
parents:
14834
diff
changeset
|
2548 |
emit_int8((unsigned char)(0x80 | cc)); |
15116
af423dcb739c
8004537: replace AbstractAssembler emit_long with emit_int32
twisti
parents:
15115
diff
changeset
|
2549 |
emit_int32(offs - long_size); |
14626 | 2550 |
} |
2551 |
} else { |
|
2552 |
#ifdef ASSERT |
|
2553 |
warning("reversing conditional branch"); |
|
2554 |
#endif /* ASSERT */ |
|
2555 |
Label skip; |
|
2556 |
jccb(reverse[cc], skip); |
|
2557 |
lea(rscratch1, dst); |
|
2558 |
Assembler::jmp(rscratch1); |
|
2559 |
bind(skip); |
|
2560 |
} |
|
2561 |
} |
|
2562 |
||
2563 |
void MacroAssembler::ldmxcsr(AddressLiteral src) { |
|
2564 |
if (reachable(src)) { |
|
2565 |
Assembler::ldmxcsr(as_Address(src)); |
|
2566 |
} else { |
|
2567 |
lea(rscratch1, src); |
|
2568 |
Assembler::ldmxcsr(Address(rscratch1, 0)); |
|
2569 |
} |
|
2570 |
} |
|
2571 |
||
2572 |
int MacroAssembler::load_signed_byte(Register dst, Address src) { |
|
2573 |
int off; |
|
2574 |
if (LP64_ONLY(true ||) VM_Version::is_P6()) { |
|
2575 |
off = offset(); |
|
2576 |
movsbl(dst, src); // movsxb |
|
2577 |
} else { |
|
2578 |
off = load_unsigned_byte(dst, src); |
|
2579 |
shll(dst, 24); |
|
2580 |
sarl(dst, 24); |
|
2581 |
} |
|
2582 |
return off; |
|
2583 |
} |
|
2584 |
||
2585 |
// Note: load_signed_short used to be called load_signed_word. |
|
2586 |
// Although the 'w' in x86 opcodes refers to the term "word" in the assembler |
|
2587 |
// manual, which means 16 bits, that usage is found nowhere in HotSpot code. |
|
2588 |
// The term "word" in HotSpot means a 32- or 64-bit machine word. |
|
2589 |
int MacroAssembler::load_signed_short(Register dst, Address src) { |
|
2590 |
int off; |
|
2591 |
if (LP64_ONLY(true ||) VM_Version::is_P6()) { |
|
2592 |
// This is dubious to me since it seems safe to do a signed 16 => 64 bit |
|
2593 |
// version but this is what 64bit has always done. This seems to imply |
|
2594 |
// that users are only using 32bits worth. |
|
2595 |
off = offset(); |
|
2596 |
movswl(dst, src); // movsxw |
|
2597 |
} else { |
|
2598 |
off = load_unsigned_short(dst, src); |
|
2599 |
shll(dst, 16); |
|
2600 |
sarl(dst, 16); |
|
2601 |
} |
|
2602 |
return off; |
|
2603 |
} |
|
2604 |
||
2605 |
int MacroAssembler::load_unsigned_byte(Register dst, Address src) { |
|
2606 |
// According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, |
|
2607 |
// and "3.9 Partial Register Penalties", p. 22). |
|
2608 |
int off; |
|
2609 |
if (LP64_ONLY(true || ) VM_Version::is_P6() || src.uses(dst)) { |
|
2610 |
off = offset(); |
|
2611 |
movzbl(dst, src); // movzxb |
|
2612 |
} else { |
|
2613 |
xorl(dst, dst); |
|
2614 |
off = offset(); |
|
2615 |
movb(dst, src); |
|
2616 |
} |
|
2617 |
return off; |
|
2618 |
} |
|
2619 |
||
2620 |
// Note: load_unsigned_short used to be called load_unsigned_word. |
|
2621 |
int MacroAssembler::load_unsigned_short(Register dst, Address src) { |
|
2622 |
// According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, |
|
2623 |
// and "3.9 Partial Register Penalties", p. 22). |
|
2624 |
int off; |
|
2625 |
if (LP64_ONLY(true ||) VM_Version::is_P6() || src.uses(dst)) { |
|
2626 |
off = offset(); |
|
2627 |
movzwl(dst, src); // movzxw |
|
2628 |
} else { |
|
2629 |
xorl(dst, dst); |
|
2630 |
off = offset(); |
|
2631 |
movw(dst, src); |
|
2632 |
} |
|
2633 |
return off; |
|
2634 |
} |
|
2635 |
||
2636 |
void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) { |
|
2637 |
switch (size_in_bytes) { |
|
2638 |
#ifndef _LP64 |
|
2639 |
case 8: |
|
2640 |
assert(dst2 != noreg, "second dest register required"); |
|
2641 |
movl(dst, src); |
|
2642 |
movl(dst2, src.plus_disp(BytesPerInt)); |
|
2643 |
break; |
|
2644 |
#else |
|
2645 |
case 8: movq(dst, src); break; |
|
2646 |
#endif |
|
2647 |
case 4: movl(dst, src); break; |
|
2648 |
case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break; |
|
2649 |
case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break; |
|
2650 |
default: ShouldNotReachHere(); |
|
2651 |
} |
|
2652 |
} |
|
2653 |
||
2654 |
void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) { |
|
2655 |
switch (size_in_bytes) { |
|
2656 |
#ifndef _LP64 |
|
2657 |
case 8: |
|
2658 |
assert(src2 != noreg, "second source register required"); |
|
2659 |
movl(dst, src); |
|
2660 |
movl(dst.plus_disp(BytesPerInt), src2); |
|
2661 |
break; |
|
2662 |
#else |
|
2663 |
case 8: movq(dst, src); break; |
|
2664 |
#endif |
|
2665 |
case 4: movl(dst, src); break; |
|
2666 |
case 2: movw(dst, src); break; |
|
2667 |
case 1: movb(dst, src); break; |
|
2668 |
default: ShouldNotReachHere(); |
|
2669 |
} |
|
2670 |
} |
|
2671 |
||
2672 |
void MacroAssembler::mov32(AddressLiteral dst, Register src) { |
|
2673 |
if (reachable(dst)) { |
|
2674 |
movl(as_Address(dst), src); |
|
2675 |
} else { |
|
2676 |
lea(rscratch1, dst); |
|
2677 |
movl(Address(rscratch1, 0), src); |
|
2678 |
} |
|
2679 |
} |
|
2680 |
||
2681 |
void MacroAssembler::mov32(Register dst, AddressLiteral src) { |
|
2682 |
if (reachable(src)) { |
|
2683 |
movl(dst, as_Address(src)); |
|
2684 |
} else { |
|
2685 |
lea(rscratch1, src); |
|
2686 |
movl(dst, Address(rscratch1, 0)); |
|
2687 |
} |
|
2688 |
} |
|
2689 |
||
2690 |
// C++ bool manipulation |
|
2691 |
||
2692 |
void MacroAssembler::movbool(Register dst, Address src) { |
|
2693 |
if(sizeof(bool) == 1) |
|
2694 |
movb(dst, src); |
|
2695 |
else if(sizeof(bool) == 2) |
|
2696 |
movw(dst, src); |
|
2697 |
else if(sizeof(bool) == 4) |
|
2698 |
movl(dst, src); |
|
2699 |
else |
|
2700 |
// unsupported |
|
2701 |
ShouldNotReachHere(); |
|
2702 |
} |
|
2703 |
||
2704 |
void MacroAssembler::movbool(Address dst, bool boolconst) { |
|
2705 |
if(sizeof(bool) == 1) |
|
2706 |
movb(dst, (int) boolconst); |
|
2707 |
else if(sizeof(bool) == 2) |
|
2708 |
movw(dst, (int) boolconst); |
|
2709 |
else if(sizeof(bool) == 4) |
|
2710 |
movl(dst, (int) boolconst); |
|
2711 |
else |
|
2712 |
// unsupported |
|
2713 |
ShouldNotReachHere(); |
|
2714 |
} |
|
2715 |
||
2716 |
void MacroAssembler::movbool(Address dst, Register src) { |
|
2717 |
if(sizeof(bool) == 1) |
|
2718 |
movb(dst, src); |
|
2719 |
else if(sizeof(bool) == 2) |
|
2720 |
movw(dst, src); |
|
2721 |
else if(sizeof(bool) == 4) |
|
2722 |
movl(dst, src); |
|
2723 |
else |
|
2724 |
// unsupported |
|
2725 |
ShouldNotReachHere(); |
|
2726 |
} |
|
2727 |
||
2728 |
void MacroAssembler::movbyte(ArrayAddress dst, int src) { |
|
2729 |
movb(as_Address(dst), src); |
|
2730 |
} |
|
2731 |
||
2732 |
void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src) { |
|
2733 |
if (reachable(src)) { |
|
2734 |
movdl(dst, as_Address(src)); |
|
2735 |
} else { |
|
2736 |
lea(rscratch1, src); |
|
2737 |
movdl(dst, Address(rscratch1, 0)); |
|
2738 |
} |
|
2739 |
} |
|
2740 |
||
2741 |
void MacroAssembler::movq(XMMRegister dst, AddressLiteral src) { |
|
2742 |
if (reachable(src)) { |
|
2743 |
movq(dst, as_Address(src)); |
|
2744 |
} else { |
|
2745 |
lea(rscratch1, src); |
|
2746 |
movq(dst, Address(rscratch1, 0)); |
|
2747 |
} |
|
2748 |
} |
|
2749 |
||
2750 |
void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) { |
|
2751 |
if (reachable(src)) { |
|
2752 |
if (UseXmmLoadAndClearUpper) { |
|
2753 |
movsd (dst, as_Address(src)); |
|
2754 |
} else { |
|
2755 |
movlpd(dst, as_Address(src)); |
|
2756 |
} |
|
2757 |
} else { |
|
2758 |
lea(rscratch1, src); |
|
2759 |
if (UseXmmLoadAndClearUpper) { |
|
2760 |
movsd (dst, Address(rscratch1, 0)); |
|
2761 |
} else { |
|
2762 |
movlpd(dst, Address(rscratch1, 0)); |
|
2763 |
} |
|
2764 |
} |
|
2765 |
} |
|
2766 |
||
2767 |
void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src) { |
|
2768 |
if (reachable(src)) { |
|
2769 |
movss(dst, as_Address(src)); |
|
2770 |
} else { |
|
2771 |
lea(rscratch1, src); |
|
2772 |
movss(dst, Address(rscratch1, 0)); |
|
2773 |
} |
|
2774 |
} |
|
2775 |
||
2776 |
void MacroAssembler::movptr(Register dst, Register src) { |
|
2777 |
LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); |
|
2778 |
} |
|
2779 |
||
2780 |
void MacroAssembler::movptr(Register dst, Address src) { |
|
2781 |
LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); |
|
2782 |
} |
|
2783 |
||
2784 |
// src should NEVER be a real pointer. Use AddressLiteral for true pointers |
|
2785 |
void MacroAssembler::movptr(Register dst, intptr_t src) { |
|
2786 |
LP64_ONLY(mov64(dst, src)) NOT_LP64(movl(dst, src)); |
|
2787 |
} |
|
2788 |
||
2789 |
void MacroAssembler::movptr(Address dst, Register src) { |
|
2790 |
LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); |
|
2791 |
} |
|
2792 |
||
2793 |
void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src) { |
|
2794 |
if (reachable(src)) { |
|
2795 |
Assembler::movdqu(dst, as_Address(src)); |
|
2796 |
} else { |
|
2797 |
lea(rscratch1, src); |
|
2798 |
Assembler::movdqu(dst, Address(rscratch1, 0)); |
|
2799 |
} |
|
2800 |
} |
|
2801 |
||
18507
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
2802 |
void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src) { |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
2803 |
if (reachable(src)) { |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
2804 |
Assembler::movdqa(dst, as_Address(src)); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
2805 |
} else { |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
2806 |
lea(rscratch1, src); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
2807 |
Assembler::movdqa(dst, Address(rscratch1, 0)); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
2808 |
} |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
2809 |
} |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
2810 |
|
14626 | 2811 |
void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) { |
2812 |
if (reachable(src)) { |
|
2813 |
Assembler::movsd(dst, as_Address(src)); |
|
2814 |
} else { |
|
2815 |
lea(rscratch1, src); |
|
2816 |
Assembler::movsd(dst, Address(rscratch1, 0)); |
|
2817 |
} |
|
2818 |
} |
|
2819 |
||
2820 |
void MacroAssembler::movss(XMMRegister dst, AddressLiteral src) { |
|
2821 |
if (reachable(src)) { |
|
2822 |
Assembler::movss(dst, as_Address(src)); |
|
2823 |
} else { |
|
2824 |
lea(rscratch1, src); |
|
2825 |
Assembler::movss(dst, Address(rscratch1, 0)); |
|
2826 |
} |
|
2827 |
} |
|
2828 |
||
2829 |
void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src) { |
|
2830 |
if (reachable(src)) { |
|
2831 |
Assembler::mulsd(dst, as_Address(src)); |
|
2832 |
} else { |
|
2833 |
lea(rscratch1, src); |
|
2834 |
Assembler::mulsd(dst, Address(rscratch1, 0)); |
|
2835 |
} |
|
2836 |
} |
|
2837 |
||
2838 |
void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src) { |
|
2839 |
if (reachable(src)) { |
|
2840 |
Assembler::mulss(dst, as_Address(src)); |
|
2841 |
} else { |
|
2842 |
lea(rscratch1, src); |
|
2843 |
Assembler::mulss(dst, Address(rscratch1, 0)); |
|
2844 |
} |
|
2845 |
} |
|
2846 |
||
2847 |
void MacroAssembler::null_check(Register reg, int offset) { |
|
2848 |
if (needs_explicit_null_check(offset)) { |
|
2849 |
// provoke OS NULL exception if reg = NULL by |
|
2850 |
// accessing M[reg] w/o changing any (non-CC) registers |
|
2851 |
// NOTE: cmpl is plenty here to provoke a segv |
|
2852 |
cmpptr(rax, Address(reg, 0)); |
|
2853 |
// Note: should probably use testl(rax, Address(reg, 0)); |
|
2854 |
// may be shorter code (however, this version of |
|
2855 |
// testl needs to be implemented first) |
|
2856 |
} else { |
|
2857 |
// nothing to do, (later) access of M[reg + offset] |
|
2858 |
// will provoke OS NULL exception if reg = NULL |
|
2859 |
} |
|
2860 |
} |
|
2861 |
||
2862 |
void MacroAssembler::os_breakpoint() { |
|
2863 |
// instead of directly emitting a breakpoint, call os:breakpoint for better debugability |
|
2864 |
// (e.g., MSVC can't call ps() otherwise) |
|
2865 |
call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); |
|
2866 |
} |
|
2867 |
||
2868 |
void MacroAssembler::pop_CPU_state() { |
|
2869 |
pop_FPU_state(); |
|
2870 |
pop_IU_state(); |
|
2871 |
} |
|
2872 |
||
2873 |
void MacroAssembler::pop_FPU_state() { |
|
2874 |
NOT_LP64(frstor(Address(rsp, 0));) |
|
2875 |
LP64_ONLY(fxrstor(Address(rsp, 0));) |
|
2876 |
addptr(rsp, FPUStateSizeInWords * wordSize); |
|
2877 |
} |
|
2878 |
||
2879 |
void MacroAssembler::pop_IU_state() { |
|
2880 |
popa(); |
|
2881 |
LP64_ONLY(addq(rsp, 8)); |
|
2882 |
popf(); |
|
2883 |
} |
|
2884 |
||
2885 |
// Save Integer and Float state |
|
2886 |
// Warning: Stack must be 16 byte aligned (64bit) |
|
2887 |
void MacroAssembler::push_CPU_state() { |
|
2888 |
push_IU_state(); |
|
2889 |
push_FPU_state(); |
|
2890 |
} |
|
2891 |
||
2892 |
void MacroAssembler::push_FPU_state() { |
|
2893 |
subptr(rsp, FPUStateSizeInWords * wordSize); |
|
2894 |
#ifndef _LP64 |
|
2895 |
fnsave(Address(rsp, 0)); |
|
2896 |
fwait(); |
|
2897 |
#else |
|
2898 |
fxsave(Address(rsp, 0)); |
|
2899 |
#endif // LP64 |
|
2900 |
} |
|
2901 |
||
2902 |
void MacroAssembler::push_IU_state() { |
|
2903 |
// Push flags first because pusha kills them |
|
2904 |
pushf(); |
|
2905 |
// Make sure rsp stays 16-byte aligned |
|
2906 |
LP64_ONLY(subq(rsp, 8)); |
|
2907 |
pusha(); |
|
2908 |
} |
|
2909 |
||
2910 |
void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp, bool clear_pc) { |
|
2911 |
// determine java_thread register |
|
2912 |
if (!java_thread->is_valid()) { |
|
2913 |
java_thread = rdi; |
|
2914 |
get_thread(java_thread); |
|
2915 |
} |
|
2916 |
// we must set sp to zero to clear frame |
|
2917 |
movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), NULL_WORD); |
|
2918 |
if (clear_fp) { |
|
2919 |
movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD); |
|
2920 |
} |
|
2921 |
||
2922 |
if (clear_pc) |
|
2923 |
movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD); |
|
2924 |
||
2925 |
} |
|
2926 |
||
2927 |
void MacroAssembler::restore_rax(Register tmp) { |
|
2928 |
if (tmp == noreg) pop(rax); |
|
2929 |
else if (tmp != rax) mov(rax, tmp); |
|
2930 |
} |
|
2931 |
||
2932 |
void MacroAssembler::round_to(Register reg, int modulus) { |
|
2933 |
addptr(reg, modulus - 1); |
|
2934 |
andptr(reg, -modulus); |
|
2935 |
} |
|
2936 |
||
2937 |
void MacroAssembler::save_rax(Register tmp) { |
|
2938 |
if (tmp == noreg) push(rax); |
|
2939 |
else if (tmp != rax) mov(tmp, rax); |
|
2940 |
} |
|
2941 |
||
2942 |
// Write serialization page so VM thread can do a pseudo remote membar. |
|
2943 |
// We use the current thread pointer to calculate a thread specific |
|
2944 |
// offset to write to within the page. This minimizes bus traffic |
|
2945 |
// due to cache line collision. |
|
2946 |
void MacroAssembler::serialize_memory(Register thread, Register tmp) { |
|
2947 |
movl(tmp, thread); |
|
2948 |
shrl(tmp, os::get_serialize_page_shift_count()); |
|
2949 |
andl(tmp, (os::vm_page_size() - sizeof(int))); |
|
2950 |
||
2951 |
Address index(noreg, tmp, Address::times_1); |
|
2952 |
ExternalAddress page(os::get_memory_serialize_page()); |
|
2953 |
||
2954 |
// Size of store must match masking code above |
|
2955 |
movl(as_Address(ArrayAddress(page, index)), tmp); |
|
2956 |
} |
|
2957 |
||
2958 |
// Calls to C land |
|
2959 |
// |
|
2960 |
// When entering C land, the rbp, & rsp of the last Java frame have to be recorded |
|
2961 |
// in the (thread-local) JavaThread object. When leaving C land, the last Java fp |
|
2962 |
// has to be reset to 0. This is required to allow proper stack traversal. |
|
2963 |
void MacroAssembler::set_last_Java_frame(Register java_thread, |
|
2964 |
Register last_java_sp, |
|
2965 |
Register last_java_fp, |
|
2966 |
address last_java_pc) { |
|
2967 |
// determine java_thread register |
|
2968 |
if (!java_thread->is_valid()) { |
|
2969 |
java_thread = rdi; |
|
2970 |
get_thread(java_thread); |
|
2971 |
} |
|
2972 |
// determine last_java_sp register |
|
2973 |
if (!last_java_sp->is_valid()) { |
|
2974 |
last_java_sp = rsp; |
|
2975 |
} |
|
2976 |
||
2977 |
// last_java_fp is optional |
|
2978 |
||
2979 |
if (last_java_fp->is_valid()) { |
|
2980 |
movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp); |
|
2981 |
} |
|
2982 |
||
2983 |
// last_java_pc is optional |
|
2984 |
||
2985 |
if (last_java_pc != NULL) { |
|
2986 |
lea(Address(java_thread, |
|
2987 |
JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()), |
|
2988 |
InternalAddress(last_java_pc)); |
|
2989 |
||
2990 |
} |
|
2991 |
movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp); |
|
2992 |
} |
|
2993 |
||
2994 |
void MacroAssembler::shlptr(Register dst, int imm8) { |
|
2995 |
LP64_ONLY(shlq(dst, imm8)) NOT_LP64(shll(dst, imm8)); |
|
2996 |
} |
|
2997 |
||
2998 |
void MacroAssembler::shrptr(Register dst, int imm8) { |
|
2999 |
LP64_ONLY(shrq(dst, imm8)) NOT_LP64(shrl(dst, imm8)); |
|
3000 |
} |
|
3001 |
||
3002 |
void MacroAssembler::sign_extend_byte(Register reg) { |
|
3003 |
if (LP64_ONLY(true ||) (VM_Version::is_P6() && reg->has_byte_register())) { |
|
3004 |
movsbl(reg, reg); // movsxb |
|
3005 |
} else { |
|
3006 |
shll(reg, 24); |
|
3007 |
sarl(reg, 24); |
|
3008 |
} |
|
3009 |
} |
|
3010 |
||
3011 |
void MacroAssembler::sign_extend_short(Register reg) { |
|
3012 |
if (LP64_ONLY(true ||) VM_Version::is_P6()) { |
|
3013 |
movswl(reg, reg); // movsxw |
|
3014 |
} else { |
|
3015 |
shll(reg, 16); |
|
3016 |
sarl(reg, 16); |
|
3017 |
} |
|
3018 |
} |
|
3019 |
||
3020 |
void MacroAssembler::testl(Register dst, AddressLiteral src) { |
|
3021 |
assert(reachable(src), "Address should be reachable"); |
|
3022 |
testl(dst, as_Address(src)); |
|
3023 |
} |
|
3024 |
||
3025 |
void MacroAssembler::sqrtsd(XMMRegister dst, AddressLiteral src) { |
|
3026 |
if (reachable(src)) { |
|
3027 |
Assembler::sqrtsd(dst, as_Address(src)); |
|
3028 |
} else { |
|
3029 |
lea(rscratch1, src); |
|
3030 |
Assembler::sqrtsd(dst, Address(rscratch1, 0)); |
|
3031 |
} |
|
3032 |
} |
|
3033 |
||
3034 |
void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src) { |
|
3035 |
if (reachable(src)) { |
|
3036 |
Assembler::sqrtss(dst, as_Address(src)); |
|
3037 |
} else { |
|
3038 |
lea(rscratch1, src); |
|
3039 |
Assembler::sqrtss(dst, Address(rscratch1, 0)); |
|
3040 |
} |
|
3041 |
} |
|
3042 |
||
3043 |
void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src) { |
|
3044 |
if (reachable(src)) { |
|
3045 |
Assembler::subsd(dst, as_Address(src)); |
|
3046 |
} else { |
|
3047 |
lea(rscratch1, src); |
|
3048 |
Assembler::subsd(dst, Address(rscratch1, 0)); |
|
3049 |
} |
|
3050 |
} |
|
3051 |
||
3052 |
void MacroAssembler::subss(XMMRegister dst, AddressLiteral src) { |
|
3053 |
if (reachable(src)) { |
|
3054 |
Assembler::subss(dst, as_Address(src)); |
|
3055 |
} else { |
|
3056 |
lea(rscratch1, src); |
|
3057 |
Assembler::subss(dst, Address(rscratch1, 0)); |
|
3058 |
} |
|
3059 |
} |
|
3060 |
||
3061 |
void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src) { |
|
3062 |
if (reachable(src)) { |
|
3063 |
Assembler::ucomisd(dst, as_Address(src)); |
|
3064 |
} else { |
|
3065 |
lea(rscratch1, src); |
|
3066 |
Assembler::ucomisd(dst, Address(rscratch1, 0)); |
|
3067 |
} |
|
3068 |
} |
|
3069 |
||
3070 |
void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src) { |
|
3071 |
if (reachable(src)) { |
|
3072 |
Assembler::ucomiss(dst, as_Address(src)); |
|
3073 |
} else { |
|
3074 |
lea(rscratch1, src); |
|
3075 |
Assembler::ucomiss(dst, Address(rscratch1, 0)); |
|
3076 |
} |
|
3077 |
} |
|
3078 |
||
3079 |
void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src) { |
|
3080 |
// Used in sign-bit flipping with aligned address. |
|
3081 |
assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); |
|
3082 |
if (reachable(src)) { |
|
3083 |
Assembler::xorpd(dst, as_Address(src)); |
|
3084 |
} else { |
|
3085 |
lea(rscratch1, src); |
|
3086 |
Assembler::xorpd(dst, Address(rscratch1, 0)); |
|
3087 |
} |
|
3088 |
} |
|
3089 |
||
3090 |
void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) { |
|
3091 |
// Used in sign-bit flipping with aligned address. |
|
3092 |
assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); |
|
3093 |
if (reachable(src)) { |
|
3094 |
Assembler::xorps(dst, as_Address(src)); |
|
3095 |
} else { |
|
3096 |
lea(rscratch1, src); |
|
3097 |
Assembler::xorps(dst, Address(rscratch1, 0)); |
|
3098 |
} |
|
3099 |
} |
|
3100 |
||
3101 |
void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src) { |
|
3102 |
// Used in sign-bit flipping with aligned address. |
|
14834 | 3103 |
bool aligned_adr = (((intptr_t)src.target() & 15) == 0); |
3104 |
assert((UseAVX > 0) || aligned_adr, "SSE mode requires address alignment 16 bytes"); |
|
14626 | 3105 |
if (reachable(src)) { |
3106 |
Assembler::pshufb(dst, as_Address(src)); |
|
3107 |
} else { |
|
3108 |
lea(rscratch1, src); |
|
3109 |
Assembler::pshufb(dst, Address(rscratch1, 0)); |
|
3110 |
} |
|
3111 |
} |
|
3112 |
||
3113 |
// AVX 3-operands instructions |
|
3114 |
||
3115 |
void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) { |
|
3116 |
if (reachable(src)) { |
|
3117 |
vaddsd(dst, nds, as_Address(src)); |
|
3118 |
} else { |
|
3119 |
lea(rscratch1, src); |
|
3120 |
vaddsd(dst, nds, Address(rscratch1, 0)); |
|
3121 |
} |
|
3122 |
} |
|
3123 |
||
3124 |
void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src) { |
|
3125 |
if (reachable(src)) { |
|
3126 |
vaddss(dst, nds, as_Address(src)); |
|
3127 |
} else { |
|
3128 |
lea(rscratch1, src); |
|
3129 |
vaddss(dst, nds, Address(rscratch1, 0)); |
|
3130 |
} |
|
3131 |
} |
|
3132 |
||
3133 |
void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) { |
|
3134 |
if (reachable(src)) { |
|
3135 |
vandpd(dst, nds, as_Address(src), vector256); |
|
3136 |
} else { |
|
3137 |
lea(rscratch1, src); |
|
3138 |
vandpd(dst, nds, Address(rscratch1, 0), vector256); |
|
3139 |
} |
|
3140 |
} |
|
3141 |
||
3142 |
void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) { |
|
3143 |
if (reachable(src)) { |
|
3144 |
vandps(dst, nds, as_Address(src), vector256); |
|
3145 |
} else { |
|
3146 |
lea(rscratch1, src); |
|
3147 |
vandps(dst, nds, Address(rscratch1, 0), vector256); |
|
3148 |
} |
|
3149 |
} |
|
3150 |
||
3151 |
void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) { |
|
3152 |
if (reachable(src)) { |
|
3153 |
vdivsd(dst, nds, as_Address(src)); |
|
3154 |
} else { |
|
3155 |
lea(rscratch1, src); |
|
3156 |
vdivsd(dst, nds, Address(rscratch1, 0)); |
|
3157 |
} |
|
3158 |
} |
|
3159 |
||
3160 |
void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src) { |
|
3161 |
if (reachable(src)) { |
|
3162 |
vdivss(dst, nds, as_Address(src)); |
|
3163 |
} else { |
|
3164 |
lea(rscratch1, src); |
|
3165 |
vdivss(dst, nds, Address(rscratch1, 0)); |
|
3166 |
} |
|
3167 |
} |
|
3168 |
||
3169 |
void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) { |
|
3170 |
if (reachable(src)) { |
|
3171 |
vmulsd(dst, nds, as_Address(src)); |
|
3172 |
} else { |
|
3173 |
lea(rscratch1, src); |
|
3174 |
vmulsd(dst, nds, Address(rscratch1, 0)); |
|
3175 |
} |
|
3176 |
} |
|
3177 |
||
3178 |
void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src) { |
|
3179 |
if (reachable(src)) { |
|
3180 |
vmulss(dst, nds, as_Address(src)); |
|
3181 |
} else { |
|
3182 |
lea(rscratch1, src); |
|
3183 |
vmulss(dst, nds, Address(rscratch1, 0)); |
|
3184 |
} |
|
3185 |
} |
|
3186 |
||
3187 |
void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) { |
|
3188 |
if (reachable(src)) { |
|
3189 |
vsubsd(dst, nds, as_Address(src)); |
|
3190 |
} else { |
|
3191 |
lea(rscratch1, src); |
|
3192 |
vsubsd(dst, nds, Address(rscratch1, 0)); |
|
3193 |
} |
|
3194 |
} |
|
3195 |
||
3196 |
void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src) { |
|
3197 |
if (reachable(src)) { |
|
3198 |
vsubss(dst, nds, as_Address(src)); |
|
3199 |
} else { |
|
3200 |
lea(rscratch1, src); |
|
3201 |
vsubss(dst, nds, Address(rscratch1, 0)); |
|
3202 |
} |
|
3203 |
} |
|
3204 |
||
3205 |
void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) { |
|
3206 |
if (reachable(src)) { |
|
3207 |
vxorpd(dst, nds, as_Address(src), vector256); |
|
3208 |
} else { |
|
3209 |
lea(rscratch1, src); |
|
3210 |
vxorpd(dst, nds, Address(rscratch1, 0), vector256); |
|
3211 |
} |
|
3212 |
} |
|
3213 |
||
3214 |
void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) { |
|
3215 |
if (reachable(src)) { |
|
3216 |
vxorps(dst, nds, as_Address(src), vector256); |
|
3217 |
} else { |
|
3218 |
lea(rscratch1, src); |
|
3219 |
vxorps(dst, nds, Address(rscratch1, 0), vector256); |
|
3220 |
} |
|
3221 |
} |
|
3222 |
||
3223 |
||
3224 |
////////////////////////////////////////////////////////////////////////////////// |
|
15482
470d0b0c09f1
8005915: Unify SERIALGC and INCLUDE_ALTERNATE_GCS
jprovino
parents:
15117
diff
changeset
|
3225 |
#if INCLUDE_ALL_GCS |
14626 | 3226 |
|
3227 |
void MacroAssembler::g1_write_barrier_pre(Register obj, |
|
3228 |
Register pre_val, |
|
3229 |
Register thread, |
|
3230 |
Register tmp, |
|
3231 |
bool tosca_live, |
|
3232 |
bool expand_call) { |
|
3233 |
||
3234 |
// If expand_call is true then we expand the call_VM_leaf macro |
|
3235 |
// directly to skip generating the check by |
|
3236 |
// InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp. |
|
3237 |
||
3238 |
#ifdef _LP64 |
|
3239 |
assert(thread == r15_thread, "must be"); |
|
3240 |
#endif // _LP64 |
|
3241 |
||
3242 |
Label done; |
|
3243 |
Label runtime; |
|
3244 |
||
3245 |
assert(pre_val != noreg, "check this code"); |
|
3246 |
||
3247 |
if (obj != noreg) { |
|
3248 |
assert_different_registers(obj, pre_val, tmp); |
|
3249 |
assert(pre_val != rax, "check this code"); |
|
3250 |
} |
|
3251 |
||
3252 |
Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() + |
|
3253 |
PtrQueue::byte_offset_of_active())); |
|
3254 |
Address index(thread, in_bytes(JavaThread::satb_mark_queue_offset() + |
|
3255 |
PtrQueue::byte_offset_of_index())); |
|
3256 |
Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() + |
|
3257 |
PtrQueue::byte_offset_of_buf())); |
|
3258 |
||
3259 |
||
3260 |
// Is marking active? |
|
3261 |
if (in_bytes(PtrQueue::byte_width_of_active()) == 4) { |
|
3262 |
cmpl(in_progress, 0); |
|
3263 |
} else { |
|
3264 |
assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption"); |
|
3265 |
cmpb(in_progress, 0); |
|
3266 |
} |
|
3267 |
jcc(Assembler::equal, done); |
|
3268 |
||
3269 |
// Do we need to load the previous value? |
|
3270 |
if (obj != noreg) { |
|
3271 |
load_heap_oop(pre_val, Address(obj, 0)); |
|
3272 |
} |
|
3273 |
||
3274 |
// Is the previous value null? |
|
3275 |
cmpptr(pre_val, (int32_t) NULL_WORD); |
|
3276 |
jcc(Assembler::equal, done); |
|
3277 |
||
3278 |
// Can we store original value in the thread's buffer? |
|
3279 |
// Is index == 0? |
|
3280 |
// (The index field is typed as size_t.) |
|
3281 |
||
3282 |
movptr(tmp, index); // tmp := *index_adr |
|
3283 |
cmpptr(tmp, 0); // tmp == 0? |
|
3284 |
jcc(Assembler::equal, runtime); // If yes, goto runtime |
|
3285 |
||
3286 |
subptr(tmp, wordSize); // tmp := tmp - wordSize |
|
3287 |
movptr(index, tmp); // *index_adr := tmp |
|
3288 |
addptr(tmp, buffer); // tmp := tmp + *buffer_adr |
|
3289 |
||
3290 |
// Record the previous value |
|
3291 |
movptr(Address(tmp, 0), pre_val); |
|
3292 |
jmp(done); |
|
3293 |
||
3294 |
bind(runtime); |
|
3295 |
// save the live input values |
|
3296 |
if(tosca_live) push(rax); |
|
3297 |
||
3298 |
if (obj != noreg && obj != rax) |
|
3299 |
push(obj); |
|
3300 |
||
3301 |
if (pre_val != rax) |
|
3302 |
push(pre_val); |
|
3303 |
||
3304 |
// Calling the runtime using the regular call_VM_leaf mechanism generates |
|
3305 |
// code (generated by InterpreterMacroAssember::call_VM_leaf_base) |
|
3306 |
// that checks that the *(ebp+frame::interpreter_frame_last_sp) == NULL. |
|
3307 |
// |
|
3308 |
// If we care generating the pre-barrier without a frame (e.g. in the |
|
3309 |
// intrinsified Reference.get() routine) then ebp might be pointing to |
|
3310 |
// the caller frame and so this check will most likely fail at runtime. |
|
3311 |
// |
|
3312 |
// Expanding the call directly bypasses the generation of the check. |
|
3313 |
// So when we do not have have a full interpreter frame on the stack |
|
3314 |
// expand_call should be passed true. |
|
3315 |
||
3316 |
NOT_LP64( push(thread); ) |
|
3317 |
||
3318 |
if (expand_call) { |
|
3319 |
LP64_ONLY( assert(pre_val != c_rarg1, "smashed arg"); ) |
|
3320 |
pass_arg1(this, thread); |
|
3321 |
pass_arg0(this, pre_val); |
|
3322 |
MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), 2); |
|
3323 |
} else { |
|
3324 |
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread); |
|
3325 |
} |
|
3326 |
||
3327 |
NOT_LP64( pop(thread); ) |
|
3328 |
||
3329 |
// save the live input values |
|
3330 |
if (pre_val != rax) |
|
3331 |
pop(pre_val); |
|
3332 |
||
3333 |
if (obj != noreg && obj != rax) |
|
3334 |
pop(obj); |
|
3335 |
||
3336 |
if(tosca_live) pop(rax); |
|
3337 |
||
3338 |
bind(done); |
|
3339 |
} |
|
3340 |
||
3341 |
void MacroAssembler::g1_write_barrier_post(Register store_addr, |
|
3342 |
Register new_val, |
|
3343 |
Register thread, |
|
3344 |
Register tmp, |
|
3345 |
Register tmp2) { |
|
3346 |
#ifdef _LP64 |
|
3347 |
assert(thread == r15_thread, "must be"); |
|
3348 |
#endif // _LP64 |
|
3349 |
||
3350 |
Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() + |
|
3351 |
PtrQueue::byte_offset_of_index())); |
|
3352 |
Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() + |
|
3353 |
PtrQueue::byte_offset_of_buf())); |
|
3354 |
||
3355 |
BarrierSet* bs = Universe::heap()->barrier_set(); |
|
3356 |
CardTableModRefBS* ct = (CardTableModRefBS*)bs; |
|
3357 |
Label done; |
|
3358 |
Label runtime; |
|
3359 |
||
3360 |
// Does store cross heap regions? |
|
3361 |
||
3362 |
movptr(tmp, store_addr); |
|
3363 |
xorptr(tmp, new_val); |
|
3364 |
shrptr(tmp, HeapRegion::LogOfHRGrainBytes); |
|
3365 |
jcc(Assembler::equal, done); |
|
3366 |
||
3367 |
// crosses regions, storing NULL? |
|
3368 |
||
3369 |
cmpptr(new_val, (int32_t) NULL_WORD); |
|
3370 |
jcc(Assembler::equal, done); |
|
3371 |
||
3372 |
// storing region crossing non-NULL, is card already dirty? |
|
3373 |
||
3374 |
ExternalAddress cardtable((address) ct->byte_map_base); |
|
3375 |
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); |
|
3376 |
#ifdef _LP64 |
|
3377 |
const Register card_addr = tmp; |
|
3378 |
||
3379 |
movq(card_addr, store_addr); |
|
3380 |
shrq(card_addr, CardTableModRefBS::card_shift); |
|
3381 |
||
3382 |
lea(tmp2, cardtable); |
|
3383 |
||
3384 |
// get the address of the card |
|
3385 |
addq(card_addr, tmp2); |
|
3386 |
#else |
|
3387 |
const Register card_index = tmp; |
|
3388 |
||
3389 |
movl(card_index, store_addr); |
|
3390 |
shrl(card_index, CardTableModRefBS::card_shift); |
|
3391 |
||
3392 |
Address index(noreg, card_index, Address::times_1); |
|
3393 |
const Register card_addr = tmp; |
|
3394 |
lea(card_addr, as_Address(ArrayAddress(cardtable, index))); |
|
3395 |
#endif |
|
20403
45a89fbcd8f7
8014555: G1: Memory ordering problem with Conc refinement and card marking
mgerdin
parents:
19979
diff
changeset
|
3396 |
cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val()); |
14626 | 3397 |
jcc(Assembler::equal, done); |
3398 |
||
20403
45a89fbcd8f7
8014555: G1: Memory ordering problem with Conc refinement and card marking
mgerdin
parents:
19979
diff
changeset
|
3399 |
membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); |
45a89fbcd8f7
8014555: G1: Memory ordering problem with Conc refinement and card marking
mgerdin
parents:
19979
diff
changeset
|
3400 |
cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val()); |
45a89fbcd8f7
8014555: G1: Memory ordering problem with Conc refinement and card marking
mgerdin
parents:
19979
diff
changeset
|
3401 |
jcc(Assembler::equal, done); |
45a89fbcd8f7
8014555: G1: Memory ordering problem with Conc refinement and card marking
mgerdin
parents:
19979
diff
changeset
|
3402 |
|
45a89fbcd8f7
8014555: G1: Memory ordering problem with Conc refinement and card marking
mgerdin
parents:
19979
diff
changeset
|
3403 |
|
14626 | 3404 |
// storing a region crossing, non-NULL oop, card is clean. |
3405 |
// dirty card and log. |
|
3406 |
||
20403
45a89fbcd8f7
8014555: G1: Memory ordering problem with Conc refinement and card marking
mgerdin
parents:
19979
diff
changeset
|
3407 |
movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val()); |
14626 | 3408 |
|
3409 |
cmpl(queue_index, 0); |
|
3410 |
jcc(Assembler::equal, runtime); |
|
3411 |
subl(queue_index, wordSize); |
|
3412 |
movptr(tmp2, buffer); |
|
3413 |
#ifdef _LP64 |
|
3414 |
movslq(rscratch1, queue_index); |
|
3415 |
addq(tmp2, rscratch1); |
|
3416 |
movq(Address(tmp2, 0), card_addr); |
|
3417 |
#else |
|
3418 |
addl(tmp2, queue_index); |
|
3419 |
movl(Address(tmp2, 0), card_index); |
|
3420 |
#endif |
|
3421 |
jmp(done); |
|
3422 |
||
3423 |
bind(runtime); |
|
3424 |
// save the live input values |
|
3425 |
push(store_addr); |
|
3426 |
push(new_val); |
|
3427 |
#ifdef _LP64 |
|
3428 |
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, r15_thread); |
|
3429 |
#else |
|
3430 |
push(thread); |
|
3431 |
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread); |
|
3432 |
pop(thread); |
|
3433 |
#endif |
|
3434 |
pop(new_val); |
|
3435 |
pop(store_addr); |
|
3436 |
||
3437 |
bind(done); |
|
3438 |
} |
|
3439 |
||
15482
470d0b0c09f1
8005915: Unify SERIALGC and INCLUDE_ALTERNATE_GCS
jprovino
parents:
15117
diff
changeset
|
3440 |
#endif // INCLUDE_ALL_GCS |
14626 | 3441 |
////////////////////////////////////////////////////////////////////////////////// |
3442 |
||
3443 |
||
3444 |
void MacroAssembler::store_check(Register obj) { |
|
3445 |
// Does a store check for the oop in register obj. The content of |
|
3446 |
// register obj is destroyed afterwards. |
|
3447 |
store_check_part_1(obj); |
|
3448 |
store_check_part_2(obj); |
|
3449 |
} |
|
3450 |
||
3451 |
void MacroAssembler::store_check(Register obj, Address dst) { |
|
3452 |
store_check(obj); |
|
3453 |
} |
|
3454 |
||
3455 |
||
3456 |
// split the store check operation so that other instructions can be scheduled inbetween |
|
3457 |
void MacroAssembler::store_check_part_1(Register obj) { |
|
3458 |
BarrierSet* bs = Universe::heap()->barrier_set(); |
|
3459 |
assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); |
|
3460 |
shrptr(obj, CardTableModRefBS::card_shift); |
|
3461 |
} |
|
3462 |
||
3463 |
void MacroAssembler::store_check_part_2(Register obj) { |
|
3464 |
BarrierSet* bs = Universe::heap()->barrier_set(); |
|
3465 |
assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); |
|
3466 |
CardTableModRefBS* ct = (CardTableModRefBS*)bs; |
|
3467 |
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); |
|
3468 |
||
3469 |
// The calculation for byte_map_base is as follows: |
|
3470 |
// byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); |
|
3471 |
// So this essentially converts an address to a displacement and |
|
3472 |
// it will never need to be relocated. On 64bit however the value may be too |
|
3473 |
// large for a 32bit displacement |
|
3474 |
||
3475 |
intptr_t disp = (intptr_t) ct->byte_map_base; |
|
3476 |
if (is_simm32(disp)) { |
|
3477 |
Address cardtable(noreg, obj, Address::times_1, disp); |
|
3478 |
movb(cardtable, 0); |
|
3479 |
} else { |
|
3480 |
// By doing it as an ExternalAddress disp could be converted to a rip-relative |
|
3481 |
// displacement and done in a single instruction given favorable mapping and |
|
3482 |
// a smarter version of as_Address. Worst case it is two instructions which |
|
3483 |
// is no worse off then loading disp into a register and doing as a simple |
|
3484 |
// Address() as above. |
|
3485 |
// We can't do as ExternalAddress as the only style since if disp == 0 we'll |
|
3486 |
// assert since NULL isn't acceptable in a reloci (see 6644928). In any case |
|
3487 |
// in some cases we'll get a single instruction version. |
|
3488 |
||
3489 |
ExternalAddress cardtable((address)disp); |
|
3490 |
Address index(noreg, obj, Address::times_1); |
|
3491 |
movb(as_Address(ArrayAddress(cardtable, index)), 0); |
|
3492 |
} |
|
3493 |
} |
|
3494 |
||
3495 |
void MacroAssembler::subptr(Register dst, int32_t imm32) { |
|
3496 |
LP64_ONLY(subq(dst, imm32)) NOT_LP64(subl(dst, imm32)); |
|
3497 |
} |
|
3498 |
||
3499 |
// Force generation of a 4 byte immediate value even if it fits into 8bit |
|
3500 |
void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) { |
|
3501 |
LP64_ONLY(subq_imm32(dst, imm32)) NOT_LP64(subl_imm32(dst, imm32)); |
|
3502 |
} |
|
3503 |
||
3504 |
void MacroAssembler::subptr(Register dst, Register src) { |
|
3505 |
LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); |
|
3506 |
} |
|
3507 |
||
3508 |
// C++ bool manipulation |
|
3509 |
void MacroAssembler::testbool(Register dst) { |
|
3510 |
if(sizeof(bool) == 1) |
|
3511 |
testb(dst, 0xff); |
|
3512 |
else if(sizeof(bool) == 2) { |
|
3513 |
// testw implementation needed for two byte bools |
|
3514 |
ShouldNotReachHere(); |
|
3515 |
} else if(sizeof(bool) == 4) |
|
3516 |
testl(dst, dst); |
|
3517 |
else |
|
3518 |
// unsupported |
|
3519 |
ShouldNotReachHere(); |
|
3520 |
} |
|
3521 |
||
3522 |
void MacroAssembler::testptr(Register dst, Register src) { |
|
3523 |
LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src)); |
|
3524 |
} |
|
3525 |
||
3526 |
// Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. |
|
3527 |
void MacroAssembler::tlab_allocate(Register obj, |
|
3528 |
Register var_size_in_bytes, |
|
3529 |
int con_size_in_bytes, |
|
3530 |
Register t1, |
|
3531 |
Register t2, |
|
3532 |
Label& slow_case) { |
|
3533 |
assert_different_registers(obj, t1, t2); |
|
3534 |
assert_different_registers(obj, var_size_in_bytes, t1); |
|
3535 |
Register end = t2; |
|
3536 |
Register thread = NOT_LP64(t1) LP64_ONLY(r15_thread); |
|
3537 |
||
3538 |
verify_tlab(); |
|
3539 |
||
3540 |
NOT_LP64(get_thread(thread)); |
|
3541 |
||
3542 |
movptr(obj, Address(thread, JavaThread::tlab_top_offset())); |
|
3543 |
if (var_size_in_bytes == noreg) { |
|
3544 |
lea(end, Address(obj, con_size_in_bytes)); |
|
3545 |
} else { |
|
3546 |
lea(end, Address(obj, var_size_in_bytes, Address::times_1)); |
|
3547 |
} |
|
3548 |
cmpptr(end, Address(thread, JavaThread::tlab_end_offset())); |
|
3549 |
jcc(Assembler::above, slow_case); |
|
3550 |
||
3551 |
// update the tlab top pointer |
|
3552 |
movptr(Address(thread, JavaThread::tlab_top_offset()), end); |
|
3553 |
||
3554 |
// recover var_size_in_bytes if necessary |
|
3555 |
if (var_size_in_bytes == end) { |
|
3556 |
subptr(var_size_in_bytes, obj); |
|
3557 |
} |
|
3558 |
verify_tlab(); |
|
3559 |
} |
|
3560 |
||
3561 |
// Preserves rbx, and rdx. |
|
3562 |
Register MacroAssembler::tlab_refill(Label& retry, |
|
3563 |
Label& try_eden, |
|
3564 |
Label& slow_case) { |
|
3565 |
Register top = rax; |
|
3566 |
Register t1 = rcx; |
|
3567 |
Register t2 = rsi; |
|
3568 |
Register thread_reg = NOT_LP64(rdi) LP64_ONLY(r15_thread); |
|
3569 |
assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ rbx, rdx); |
|
3570 |
Label do_refill, discard_tlab; |
|
3571 |
||
3572 |
if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { |
|
3573 |
// No allocation in the shared eden. |
|
3574 |
jmp(slow_case); |
|
3575 |
} |
|
3576 |
||
3577 |
NOT_LP64(get_thread(thread_reg)); |
|
3578 |
||
3579 |
movptr(top, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); |
|
3580 |
movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset()))); |
|
3581 |
||
3582 |
// calculate amount of free space |
|
3583 |
subptr(t1, top); |
|
3584 |
shrptr(t1, LogHeapWordSize); |
|
3585 |
||
3586 |
// Retain tlab and allocate object in shared space if |
|
3587 |
// the amount free in the tlab is too large to discard. |
|
3588 |
cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset()))); |
|
3589 |
jcc(Assembler::lessEqual, discard_tlab); |
|
3590 |
||
3591 |
// Retain |
|
3592 |
// %%% yuck as movptr... |
|
3593 |
movptr(t2, (int32_t) ThreadLocalAllocBuffer::refill_waste_limit_increment()); |
|
3594 |
addptr(Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())), t2); |
|
3595 |
if (TLABStats) { |
|
3596 |
// increment number of slow_allocations |
|
3597 |
addl(Address(thread_reg, in_bytes(JavaThread::tlab_slow_allocations_offset())), 1); |
|
3598 |
} |
|
3599 |
jmp(try_eden); |
|
3600 |
||
3601 |
bind(discard_tlab); |
|
3602 |
if (TLABStats) { |
|
3603 |
// increment number of refills |
|
3604 |
addl(Address(thread_reg, in_bytes(JavaThread::tlab_number_of_refills_offset())), 1); |
|
3605 |
// accumulate wastage -- t1 is amount free in tlab |
|
3606 |
addl(Address(thread_reg, in_bytes(JavaThread::tlab_fast_refill_waste_offset())), t1); |
|
3607 |
} |
|
3608 |
||
3609 |
// if tlab is currently allocated (top or end != null) then |
|
3610 |
// fill [top, end + alignment_reserve) with array object |
|
3611 |
testptr(top, top); |
|
3612 |
jcc(Assembler::zero, do_refill); |
|
3613 |
||
3614 |
// set up the mark word |
|
3615 |
movptr(Address(top, oopDesc::mark_offset_in_bytes()), (intptr_t)markOopDesc::prototype()->copy_set_hash(0x2)); |
|
3616 |
// set the length to the remaining space |
|
3617 |
subptr(t1, typeArrayOopDesc::header_size(T_INT)); |
|
3618 |
addptr(t1, (int32_t)ThreadLocalAllocBuffer::alignment_reserve()); |
|
3619 |
shlptr(t1, log2_intptr(HeapWordSize/sizeof(jint))); |
|
3620 |
movl(Address(top, arrayOopDesc::length_offset_in_bytes()), t1); |
|
3621 |
// set klass to intArrayKlass |
|
3622 |
// dubious reloc why not an oop reloc? |
|
3623 |
movptr(t1, ExternalAddress((address)Universe::intArrayKlassObj_addr())); |
|
3624 |
// store klass last. concurrent gcs assumes klass length is valid if |
|
3625 |
// klass field is not null. |
|
3626 |
store_klass(top, t1); |
|
3627 |
||
3628 |
movptr(t1, top); |
|
3629 |
subptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset()))); |
|
3630 |
incr_allocated_bytes(thread_reg, t1, 0); |
|
3631 |
||
3632 |
// refill the tlab with an eden allocation |
|
3633 |
bind(do_refill); |
|
3634 |
movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset()))); |
|
3635 |
shlptr(t1, LogHeapWordSize); |
|
3636 |
// allocate new tlab, address returned in top |
|
3637 |
eden_allocate(top, t1, 0, t2, slow_case); |
|
3638 |
||
3639 |
// Check that t1 was preserved in eden_allocate. |
|
3640 |
#ifdef ASSERT |
|
3641 |
if (UseTLAB) { |
|
3642 |
Label ok; |
|
3643 |
Register tsize = rsi; |
|
3644 |
assert_different_registers(tsize, thread_reg, t1); |
|
3645 |
push(tsize); |
|
3646 |
movptr(tsize, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset()))); |
|
3647 |
shlptr(tsize, LogHeapWordSize); |
|
3648 |
cmpptr(t1, tsize); |
|
3649 |
jcc(Assembler::equal, ok); |
|
3650 |
STOP("assert(t1 != tlab size)"); |
|
3651 |
should_not_reach_here(); |
|
3652 |
||
3653 |
bind(ok); |
|
3654 |
pop(tsize); |
|
3655 |
} |
|
3656 |
#endif |
|
3657 |
movptr(Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())), top); |
|
3658 |
movptr(Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())), top); |
|
3659 |
addptr(top, t1); |
|
3660 |
subptr(top, (int32_t)ThreadLocalAllocBuffer::alignment_reserve_in_bytes()); |
|
3661 |
movptr(Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())), top); |
|
3662 |
verify_tlab(); |
|
3663 |
jmp(retry); |
|
3664 |
||
3665 |
return thread_reg; // for use by caller |
|
3666 |
} |
|
3667 |
||
3668 |
void MacroAssembler::incr_allocated_bytes(Register thread, |
|
3669 |
Register var_size_in_bytes, |
|
3670 |
int con_size_in_bytes, |
|
3671 |
Register t1) { |
|
3672 |
if (!thread->is_valid()) { |
|
3673 |
#ifdef _LP64 |
|
3674 |
thread = r15_thread; |
|
3675 |
#else |
|
3676 |
assert(t1->is_valid(), "need temp reg"); |
|
3677 |
thread = t1; |
|
3678 |
get_thread(thread); |
|
3679 |
#endif |
|
3680 |
} |
|
3681 |
||
3682 |
#ifdef _LP64 |
|
3683 |
if (var_size_in_bytes->is_valid()) { |
|
3684 |
addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes); |
|
3685 |
} else { |
|
3686 |
addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes); |
|
3687 |
} |
|
3688 |
#else |
|
3689 |
if (var_size_in_bytes->is_valid()) { |
|
3690 |
addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes); |
|
3691 |
} else { |
|
3692 |
addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes); |
|
3693 |
} |
|
3694 |
adcl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())+4), 0); |
|
3695 |
#endif |
|
3696 |
} |
|
3697 |
||
3698 |
void MacroAssembler::fp_runtime_fallback(address runtime_entry, int nb_args, int num_fpu_regs_in_use) { |
|
3699 |
pusha(); |
|
3700 |
||
3701 |
// if we are coming from c1, xmm registers may be live |
|
3702 |
int off = 0; |
|
3703 |
if (UseSSE == 1) { |
|
3704 |
subptr(rsp, sizeof(jdouble)*8); |
|
3705 |
movflt(Address(rsp,off++*sizeof(jdouble)),xmm0); |
|
3706 |
movflt(Address(rsp,off++*sizeof(jdouble)),xmm1); |
|
3707 |
movflt(Address(rsp,off++*sizeof(jdouble)),xmm2); |
|
3708 |
movflt(Address(rsp,off++*sizeof(jdouble)),xmm3); |
|
3709 |
movflt(Address(rsp,off++*sizeof(jdouble)),xmm4); |
|
3710 |
movflt(Address(rsp,off++*sizeof(jdouble)),xmm5); |
|
3711 |
movflt(Address(rsp,off++*sizeof(jdouble)),xmm6); |
|
3712 |
movflt(Address(rsp,off++*sizeof(jdouble)),xmm7); |
|
3713 |
} else if (UseSSE >= 2) { |
|
3714 |
#ifdef COMPILER2 |
|
3715 |
if (MaxVectorSize > 16) { |
|
3716 |
assert(UseAVX > 0, "256bit vectors are supported only with AVX"); |
|
3717 |
// Save upper half of YMM registes |
|
3718 |
subptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8)); |
|
3719 |
vextractf128h(Address(rsp, 0),xmm0); |
|
3720 |
vextractf128h(Address(rsp, 16),xmm1); |
|
3721 |
vextractf128h(Address(rsp, 32),xmm2); |
|
3722 |
vextractf128h(Address(rsp, 48),xmm3); |
|
3723 |
vextractf128h(Address(rsp, 64),xmm4); |
|
3724 |
vextractf128h(Address(rsp, 80),xmm5); |
|
3725 |
vextractf128h(Address(rsp, 96),xmm6); |
|
3726 |
vextractf128h(Address(rsp,112),xmm7); |
|
3727 |
#ifdef _LP64 |
|
3728 |
vextractf128h(Address(rsp,128),xmm8); |
|
3729 |
vextractf128h(Address(rsp,144),xmm9); |
|
3730 |
vextractf128h(Address(rsp,160),xmm10); |
|
3731 |
vextractf128h(Address(rsp,176),xmm11); |
|
3732 |
vextractf128h(Address(rsp,192),xmm12); |
|
3733 |
vextractf128h(Address(rsp,208),xmm13); |
|
3734 |
vextractf128h(Address(rsp,224),xmm14); |
|
3735 |
vextractf128h(Address(rsp,240),xmm15); |
|
3736 |
#endif |
|
3737 |
} |
|
3738 |
#endif |
|
3739 |
// Save whole 128bit (16 bytes) XMM regiters |
|
3740 |
subptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8)); |
|
3741 |
movdqu(Address(rsp,off++*16),xmm0); |
|
3742 |
movdqu(Address(rsp,off++*16),xmm1); |
|
3743 |
movdqu(Address(rsp,off++*16),xmm2); |
|
3744 |
movdqu(Address(rsp,off++*16),xmm3); |
|
3745 |
movdqu(Address(rsp,off++*16),xmm4); |
|
3746 |
movdqu(Address(rsp,off++*16),xmm5); |
|
3747 |
movdqu(Address(rsp,off++*16),xmm6); |
|
3748 |
movdqu(Address(rsp,off++*16),xmm7); |
|
3749 |
#ifdef _LP64 |
|
3750 |
movdqu(Address(rsp,off++*16),xmm8); |
|
3751 |
movdqu(Address(rsp,off++*16),xmm9); |
|
3752 |
movdqu(Address(rsp,off++*16),xmm10); |
|
3753 |
movdqu(Address(rsp,off++*16),xmm11); |
|
3754 |
movdqu(Address(rsp,off++*16),xmm12); |
|
3755 |
movdqu(Address(rsp,off++*16),xmm13); |
|
3756 |
movdqu(Address(rsp,off++*16),xmm14); |
|
3757 |
movdqu(Address(rsp,off++*16),xmm15); |
|
3758 |
#endif |
|
3759 |
} |
|
3760 |
||
3761 |
// Preserve registers across runtime call |
|
3762 |
int incoming_argument_and_return_value_offset = -1; |
|
3763 |
if (num_fpu_regs_in_use > 1) { |
|
3764 |
// Must preserve all other FPU regs (could alternatively convert |
|
3765 |
// SharedRuntime::dsin, dcos etc. into assembly routines known not to trash |
|
3766 |
// FPU state, but can not trust C compiler) |
|
3767 |
NEEDS_CLEANUP; |
|
3768 |
// NOTE that in this case we also push the incoming argument(s) to |
|
3769 |
// the stack and restore it later; we also use this stack slot to |
|
3770 |
// hold the return value from dsin, dcos etc. |
|
3771 |
for (int i = 0; i < num_fpu_regs_in_use; i++) { |
|
3772 |
subptr(rsp, sizeof(jdouble)); |
|
3773 |
fstp_d(Address(rsp, 0)); |
|
3774 |
} |
|
3775 |
incoming_argument_and_return_value_offset = sizeof(jdouble)*(num_fpu_regs_in_use-1); |
|
3776 |
for (int i = nb_args-1; i >= 0; i--) { |
|
3777 |
fld_d(Address(rsp, incoming_argument_and_return_value_offset-i*sizeof(jdouble))); |
|
3778 |
} |
|
3779 |
} |
|
3780 |
||
3781 |
subptr(rsp, nb_args*sizeof(jdouble)); |
|
3782 |
for (int i = 0; i < nb_args; i++) { |
|
3783 |
fstp_d(Address(rsp, i*sizeof(jdouble))); |
|
3784 |
} |
|
3785 |
||
3786 |
#ifdef _LP64 |
|
3787 |
if (nb_args > 0) { |
|
3788 |
movdbl(xmm0, Address(rsp, 0)); |
|
3789 |
} |
|
3790 |
if (nb_args > 1) { |
|
3791 |
movdbl(xmm1, Address(rsp, sizeof(jdouble))); |
|
3792 |
} |
|
3793 |
assert(nb_args <= 2, "unsupported number of args"); |
|
3794 |
#endif // _LP64 |
|
3795 |
||
3796 |
// NOTE: we must not use call_VM_leaf here because that requires a |
|
3797 |
// complete interpreter frame in debug mode -- same bug as 4387334 |
|
3798 |
// MacroAssembler::call_VM_leaf_base is perfectly safe and will |
|
3799 |
// do proper 64bit abi |
|
3800 |
||
3801 |
NEEDS_CLEANUP; |
|
3802 |
// Need to add stack banging before this runtime call if it needs to |
|
3803 |
// be taken; however, there is no generic stack banging routine at |
|
3804 |
// the MacroAssembler level |
|
3805 |
||
3806 |
MacroAssembler::call_VM_leaf_base(runtime_entry, 0); |
|
3807 |
||
3808 |
#ifdef _LP64 |
|
3809 |
movsd(Address(rsp, 0), xmm0); |
|
3810 |
fld_d(Address(rsp, 0)); |
|
3811 |
#endif // _LP64 |
|
3812 |
addptr(rsp, sizeof(jdouble) * nb_args); |
|
3813 |
if (num_fpu_regs_in_use > 1) { |
|
3814 |
// Must save return value to stack and then restore entire FPU |
|
3815 |
// stack except incoming arguments |
|
3816 |
fstp_d(Address(rsp, incoming_argument_and_return_value_offset)); |
|
3817 |
for (int i = 0; i < num_fpu_regs_in_use - nb_args; i++) { |
|
3818 |
fld_d(Address(rsp, 0)); |
|
3819 |
addptr(rsp, sizeof(jdouble)); |
|
3820 |
} |
|
3821 |
fld_d(Address(rsp, (nb_args-1)*sizeof(jdouble))); |
|
3822 |
addptr(rsp, sizeof(jdouble) * nb_args); |
|
3823 |
} |
|
3824 |
||
3825 |
off = 0; |
|
3826 |
if (UseSSE == 1) { |
|
3827 |
movflt(xmm0, Address(rsp,off++*sizeof(jdouble))); |
|
3828 |
movflt(xmm1, Address(rsp,off++*sizeof(jdouble))); |
|
3829 |
movflt(xmm2, Address(rsp,off++*sizeof(jdouble))); |
|
3830 |
movflt(xmm3, Address(rsp,off++*sizeof(jdouble))); |
|
3831 |
movflt(xmm4, Address(rsp,off++*sizeof(jdouble))); |
|
3832 |
movflt(xmm5, Address(rsp,off++*sizeof(jdouble))); |
|
3833 |
movflt(xmm6, Address(rsp,off++*sizeof(jdouble))); |
|
3834 |
movflt(xmm7, Address(rsp,off++*sizeof(jdouble))); |
|
3835 |
addptr(rsp, sizeof(jdouble)*8); |
|
3836 |
} else if (UseSSE >= 2) { |
|
3837 |
// Restore whole 128bit (16 bytes) XMM regiters |
|
3838 |
movdqu(xmm0, Address(rsp,off++*16)); |
|
3839 |
movdqu(xmm1, Address(rsp,off++*16)); |
|
3840 |
movdqu(xmm2, Address(rsp,off++*16)); |
|
3841 |
movdqu(xmm3, Address(rsp,off++*16)); |
|
3842 |
movdqu(xmm4, Address(rsp,off++*16)); |
|
3843 |
movdqu(xmm5, Address(rsp,off++*16)); |
|
3844 |
movdqu(xmm6, Address(rsp,off++*16)); |
|
3845 |
movdqu(xmm7, Address(rsp,off++*16)); |
|
3846 |
#ifdef _LP64 |
|
3847 |
movdqu(xmm8, Address(rsp,off++*16)); |
|
3848 |
movdqu(xmm9, Address(rsp,off++*16)); |
|
3849 |
movdqu(xmm10, Address(rsp,off++*16)); |
|
3850 |
movdqu(xmm11, Address(rsp,off++*16)); |
|
3851 |
movdqu(xmm12, Address(rsp,off++*16)); |
|
3852 |
movdqu(xmm13, Address(rsp,off++*16)); |
|
3853 |
movdqu(xmm14, Address(rsp,off++*16)); |
|
3854 |
movdqu(xmm15, Address(rsp,off++*16)); |
|
3855 |
#endif |
|
3856 |
addptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8)); |
|
3857 |
#ifdef COMPILER2 |
|
3858 |
if (MaxVectorSize > 16) { |
|
3859 |
// Restore upper half of YMM registes. |
|
3860 |
vinsertf128h(xmm0, Address(rsp, 0)); |
|
3861 |
vinsertf128h(xmm1, Address(rsp, 16)); |
|
3862 |
vinsertf128h(xmm2, Address(rsp, 32)); |
|
3863 |
vinsertf128h(xmm3, Address(rsp, 48)); |
|
3864 |
vinsertf128h(xmm4, Address(rsp, 64)); |
|
3865 |
vinsertf128h(xmm5, Address(rsp, 80)); |
|
3866 |
vinsertf128h(xmm6, Address(rsp, 96)); |
|
3867 |
vinsertf128h(xmm7, Address(rsp,112)); |
|
3868 |
#ifdef _LP64 |
|
3869 |
vinsertf128h(xmm8, Address(rsp,128)); |
|
3870 |
vinsertf128h(xmm9, Address(rsp,144)); |
|
3871 |
vinsertf128h(xmm10, Address(rsp,160)); |
|
3872 |
vinsertf128h(xmm11, Address(rsp,176)); |
|
3873 |
vinsertf128h(xmm12, Address(rsp,192)); |
|
3874 |
vinsertf128h(xmm13, Address(rsp,208)); |
|
3875 |
vinsertf128h(xmm14, Address(rsp,224)); |
|
3876 |
vinsertf128h(xmm15, Address(rsp,240)); |
|
3877 |
#endif |
|
3878 |
addptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8)); |
|
3879 |
} |
|
3880 |
#endif |
|
3881 |
} |
|
3882 |
popa(); |
|
3883 |
} |
|
3884 |
||
3885 |
static const double pi_4 = 0.7853981633974483; |
|
3886 |
||
3887 |
void MacroAssembler::trigfunc(char trig, int num_fpu_regs_in_use) { |
|
3888 |
// A hand-coded argument reduction for values in fabs(pi/4, pi/2) |
|
3889 |
// was attempted in this code; unfortunately it appears that the |
|
3890 |
// switch to 80-bit precision and back causes this to be |
|
3891 |
// unprofitable compared with simply performing a runtime call if |
|
3892 |
// the argument is out of the (-pi/4, pi/4) range. |
|
3893 |
||
3894 |
Register tmp = noreg; |
|
3895 |
if (!VM_Version::supports_cmov()) { |
|
3896 |
// fcmp needs a temporary so preserve rbx, |
|
3897 |
tmp = rbx; |
|
3898 |
push(tmp); |
|
3899 |
} |
|
3900 |
||
3901 |
Label slow_case, done; |
|
3902 |
||
3903 |
ExternalAddress pi4_adr = (address)&pi_4; |
|
3904 |
if (reachable(pi4_adr)) { |
|
3905 |
// x ?<= pi/4 |
|
3906 |
fld_d(pi4_adr); |
|
3907 |
fld_s(1); // Stack: X PI/4 X |
|
3908 |
fabs(); // Stack: |X| PI/4 X |
|
3909 |
fcmp(tmp); |
|
3910 |
jcc(Assembler::above, slow_case); |
|
3911 |
||
3912 |
// fastest case: -pi/4 <= x <= pi/4 |
|
3913 |
switch(trig) { |
|
3914 |
case 's': |
|
3915 |
fsin(); |
|
3916 |
break; |
|
3917 |
case 'c': |
|
3918 |
fcos(); |
|
3919 |
break; |
|
3920 |
case 't': |
|
3921 |
ftan(); |
|
3922 |
break; |
|
3923 |
default: |
|
3924 |
assert(false, "bad intrinsic"); |
|
3925 |
break; |
|
3926 |
} |
|
3927 |
jmp(done); |
|
3928 |
} |
|
3929 |
||
3930 |
// slow case: runtime call |
|
3931 |
bind(slow_case); |
|
3932 |
||
3933 |
switch(trig) { |
|
3934 |
case 's': |
|
3935 |
{ |
|
3936 |
fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), 1, num_fpu_regs_in_use); |
|
3937 |
} |
|
3938 |
break; |
|
3939 |
case 'c': |
|
3940 |
{ |
|
3941 |
fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), 1, num_fpu_regs_in_use); |
|
3942 |
} |
|
3943 |
break; |
|
3944 |
case 't': |
|
3945 |
{ |
|
3946 |
fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), 1, num_fpu_regs_in_use); |
|
3947 |
} |
|
3948 |
break; |
|
3949 |
default: |
|
3950 |
assert(false, "bad intrinsic"); |
|
3951 |
break; |
|
3952 |
} |
|
3953 |
||
3954 |
// Come here with result in F-TOS |
|
3955 |
bind(done); |
|
3956 |
||
3957 |
if (tmp != noreg) { |
|
3958 |
pop(tmp); |
|
3959 |
} |
|
3960 |
} |
|
3961 |
||
3962 |
||
3963 |
// Look up the method for a megamorphic invokeinterface call. |
|
3964 |
// The target method is determined by <intf_klass, itable_index>. |
|
3965 |
// The receiver klass is in recv_klass. |
|
3966 |
// On success, the result will be in method_result, and execution falls through. |
|
3967 |
// On failure, execution transfers to the given label. |
|
3968 |
void MacroAssembler::lookup_interface_method(Register recv_klass, |
|
3969 |
Register intf_klass, |
|
3970 |
RegisterOrConstant itable_index, |
|
3971 |
Register method_result, |
|
3972 |
Register scan_temp, |
|
3973 |
Label& L_no_such_interface) { |
|
3974 |
assert_different_registers(recv_klass, intf_klass, method_result, scan_temp); |
|
3975 |
assert(itable_index.is_constant() || itable_index.as_register() == method_result, |
|
3976 |
"caller must use same register for non-constant itable index as for method"); |
|
3977 |
||
3978 |
// Compute start of first itableOffsetEntry (which is at the end of the vtable) |
|
3979 |
int vtable_base = InstanceKlass::vtable_start_offset() * wordSize; |
|
3980 |
int itentry_off = itableMethodEntry::method_offset_in_bytes(); |
|
3981 |
int scan_step = itableOffsetEntry::size() * wordSize; |
|
3982 |
int vte_size = vtableEntry::size() * wordSize; |
|
3983 |
Address::ScaleFactor times_vte_scale = Address::times_ptr; |
|
3984 |
assert(vte_size == wordSize, "else adjust times_vte_scale"); |
|
3985 |
||
3986 |
movl(scan_temp, Address(recv_klass, InstanceKlass::vtable_length_offset() * wordSize)); |
|
3987 |
||
3988 |
// %%% Could store the aligned, prescaled offset in the klassoop. |
|
3989 |
lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base)); |
|
3990 |
if (HeapWordsPerLong > 1) { |
|
3991 |
// Round up to align_object_offset boundary |
|
3992 |
// see code for InstanceKlass::start_of_itable! |
|
3993 |
round_to(scan_temp, BytesPerLong); |
|
3994 |
} |
|
3995 |
||
3996 |
// Adjust recv_klass by scaled itable_index, so we can free itable_index. |
|
3997 |
assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); |
|
3998 |
lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); |
|
3999 |
||
4000 |
// for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) { |
|
4001 |
// if (scan->interface() == intf) { |
|
4002 |
// result = (klass + scan->offset() + itable_index); |
|
4003 |
// } |
|
4004 |
// } |
|
4005 |
Label search, found_method; |
|
4006 |
||
4007 |
for (int peel = 1; peel >= 0; peel--) { |
|
4008 |
movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes())); |
|
4009 |
cmpptr(intf_klass, method_result); |
|
4010 |
||
4011 |
if (peel) { |
|
4012 |
jccb(Assembler::equal, found_method); |
|
4013 |
} else { |
|
4014 |
jccb(Assembler::notEqual, search); |
|
4015 |
// (invert the test to fall through to found_method...) |
|
4016 |
} |
|
4017 |
||
4018 |
if (!peel) break; |
|
4019 |
||
4020 |
bind(search); |
|
4021 |
||
4022 |
// Check that the previous entry is non-null. A null entry means that |
|
4023 |
// the receiver class doesn't implement the interface, and wasn't the |
|
4024 |
// same as when the caller was compiled. |
|
4025 |
testptr(method_result, method_result); |
|
4026 |
jcc(Assembler::zero, L_no_such_interface); |
|
4027 |
addptr(scan_temp, scan_step); |
|
4028 |
} |
|
4029 |
||
4030 |
bind(found_method); |
|
4031 |
||
4032 |
// Got a hit. |
|
4033 |
movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes())); |
|
4034 |
movptr(method_result, Address(recv_klass, scan_temp, Address::times_1)); |
|
4035 |
} |
|
4036 |
||
4037 |
||
4038 |
// virtual method calling |
|
4039 |
void MacroAssembler::lookup_virtual_method(Register recv_klass, |
|
4040 |
RegisterOrConstant vtable_index, |
|
4041 |
Register method_result) { |
|
4042 |
const int base = InstanceKlass::vtable_start_offset() * wordSize; |
|
4043 |
assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below"); |
|
4044 |
Address vtable_entry_addr(recv_klass, |
|
4045 |
vtable_index, Address::times_ptr, |
|
4046 |
base + vtableEntry::method_offset_in_bytes()); |
|
4047 |
movptr(method_result, vtable_entry_addr); |
|
4048 |
} |
|
4049 |
||
4050 |
||
4051 |
void MacroAssembler::check_klass_subtype(Register sub_klass, |
|
4052 |
Register super_klass, |
|
4053 |
Register temp_reg, |
|
4054 |
Label& L_success) { |
|
4055 |
Label L_failure; |
|
4056 |
check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, NULL); |
|
4057 |
check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, NULL); |
|
4058 |
bind(L_failure); |
|
4059 |
} |
|
4060 |
||
4061 |
||
4062 |
void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, |
|
4063 |
Register super_klass, |
|
4064 |
Register temp_reg, |
|
4065 |
Label* L_success, |
|
4066 |
Label* L_failure, |
|
4067 |
Label* L_slow_path, |
|
4068 |
RegisterOrConstant super_check_offset) { |
|
4069 |
assert_different_registers(sub_klass, super_klass, temp_reg); |
|
4070 |
bool must_load_sco = (super_check_offset.constant_or_zero() == -1); |
|
4071 |
if (super_check_offset.is_register()) { |
|
4072 |
assert_different_registers(sub_klass, super_klass, |
|
4073 |
super_check_offset.as_register()); |
|
4074 |
} else if (must_load_sco) { |
|
4075 |
assert(temp_reg != noreg, "supply either a temp or a register offset"); |
|
4076 |
} |
|
4077 |
||
4078 |
Label L_fallthrough; |
|
4079 |
int label_nulls = 0; |
|
4080 |
if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } |
|
4081 |
if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } |
|
4082 |
if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } |
|
4083 |
assert(label_nulls <= 1, "at most one NULL in the batch"); |
|
4084 |
||
4085 |
int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); |
|
4086 |
int sco_offset = in_bytes(Klass::super_check_offset_offset()); |
|
4087 |
Address super_check_offset_addr(super_klass, sco_offset); |
|
4088 |
||
4089 |
// Hacked jcc, which "knows" that L_fallthrough, at least, is in |
|
4090 |
// range of a jccb. If this routine grows larger, reconsider at |
|
4091 |
// least some of these. |
|
4092 |
#define local_jcc(assembler_cond, label) \ |
|
4093 |
if (&(label) == &L_fallthrough) jccb(assembler_cond, label); \ |
|
4094 |
else jcc( assembler_cond, label) /*omit semi*/ |
|
4095 |
||
4096 |
// Hacked jmp, which may only be used just before L_fallthrough. |
|
4097 |
#define final_jmp(label) \ |
|
4098 |
if (&(label) == &L_fallthrough) { /*do nothing*/ } \ |
|
4099 |
else jmp(label) /*omit semi*/ |
|
4100 |
||
4101 |
// If the pointers are equal, we are done (e.g., String[] elements). |
|
4102 |
// This self-check enables sharing of secondary supertype arrays among |
|
4103 |
// non-primary types such as array-of-interface. Otherwise, each such |
|
4104 |
// type would need its own customized SSA. |
|
4105 |
// We move this check to the front of the fast path because many |
|
4106 |
// type checks are in fact trivially successful in this manner, |
|
4107 |
// so we get a nicely predicted branch right at the start of the check. |
|
4108 |
cmpptr(sub_klass, super_klass); |
|
4109 |
local_jcc(Assembler::equal, *L_success); |
|
4110 |
||
4111 |
// Check the supertype display: |
|
4112 |
if (must_load_sco) { |
|
4113 |
// Positive movl does right thing on LP64. |
|
4114 |
movl(temp_reg, super_check_offset_addr); |
|
4115 |
super_check_offset = RegisterOrConstant(temp_reg); |
|
4116 |
} |
|
4117 |
Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0); |
|
4118 |
cmpptr(super_klass, super_check_addr); // load displayed supertype |
|
4119 |
||
4120 |
// This check has worked decisively for primary supers. |
|
4121 |
// Secondary supers are sought in the super_cache ('super_cache_addr'). |
|
4122 |
// (Secondary supers are interfaces and very deeply nested subtypes.) |
|
4123 |
// This works in the same check above because of a tricky aliasing |
|
4124 |
// between the super_cache and the primary super display elements. |
|
4125 |
// (The 'super_check_addr' can address either, as the case requires.) |
|
4126 |
// Note that the cache is updated below if it does not help us find |
|
4127 |
// what we need immediately. |
|
4128 |
// So if it was a primary super, we can just fail immediately. |
|
4129 |
// Otherwise, it's the slow path for us (no success at this point). |
|
4130 |
||
4131 |
if (super_check_offset.is_register()) { |
|
4132 |
local_jcc(Assembler::equal, *L_success); |
|
4133 |
cmpl(super_check_offset.as_register(), sc_offset); |
|
4134 |
if (L_failure == &L_fallthrough) { |
|
4135 |
local_jcc(Assembler::equal, *L_slow_path); |
|
4136 |
} else { |
|
4137 |
local_jcc(Assembler::notEqual, *L_failure); |
|
4138 |
final_jmp(*L_slow_path); |
|
4139 |
} |
|
4140 |
} else if (super_check_offset.as_constant() == sc_offset) { |
|
4141 |
// Need a slow path; fast failure is impossible. |
|
4142 |
if (L_slow_path == &L_fallthrough) { |
|
4143 |
local_jcc(Assembler::equal, *L_success); |
|
4144 |
} else { |
|
4145 |
local_jcc(Assembler::notEqual, *L_slow_path); |
|
4146 |
final_jmp(*L_success); |
|
4147 |
} |
|
4148 |
} else { |
|
4149 |
// No slow path; it's a fast decision. |
|
4150 |
if (L_failure == &L_fallthrough) { |
|
4151 |
local_jcc(Assembler::equal, *L_success); |
|
4152 |
} else { |
|
4153 |
local_jcc(Assembler::notEqual, *L_failure); |
|
4154 |
final_jmp(*L_success); |
|
4155 |
} |
|
4156 |
} |
|
4157 |
||
4158 |
bind(L_fallthrough); |
|
4159 |
||
4160 |
#undef local_jcc |
|
4161 |
#undef final_jmp |
|
4162 |
} |
|
4163 |
||
4164 |
||
4165 |
void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, |
|
4166 |
Register super_klass, |
|
4167 |
Register temp_reg, |
|
4168 |
Register temp2_reg, |
|
4169 |
Label* L_success, |
|
4170 |
Label* L_failure, |
|
4171 |
bool set_cond_codes) { |
|
4172 |
assert_different_registers(sub_klass, super_klass, temp_reg); |
|
4173 |
if (temp2_reg != noreg) |
|
4174 |
assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg); |
|
4175 |
#define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg) |
|
4176 |
||
4177 |
Label L_fallthrough; |
|
4178 |
int label_nulls = 0; |
|
4179 |
if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } |
|
4180 |
if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } |
|
4181 |
assert(label_nulls <= 1, "at most one NULL in the batch"); |
|
4182 |
||
4183 |
// a couple of useful fields in sub_klass: |
|
4184 |
int ss_offset = in_bytes(Klass::secondary_supers_offset()); |
|
4185 |
int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); |
|
4186 |
Address secondary_supers_addr(sub_klass, ss_offset); |
|
4187 |
Address super_cache_addr( sub_klass, sc_offset); |
|
4188 |
||
4189 |
// Do a linear scan of the secondary super-klass chain. |
|
4190 |
// This code is rarely used, so simplicity is a virtue here. |
|
4191 |
// The repne_scan instruction uses fixed registers, which we must spill. |
|
4192 |
// Don't worry too much about pre-existing connections with the input regs. |
|
4193 |
||
4194 |
assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super) |
|
4195 |
assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter) |
|
4196 |
||
4197 |
// Get super_klass value into rax (even if it was in rdi or rcx). |
|
4198 |
bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false; |
|
4199 |
if (super_klass != rax || UseCompressedOops) { |
|
4200 |
if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; } |
|
4201 |
mov(rax, super_klass); |
|
4202 |
} |
|
4203 |
if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; } |
|
4204 |
if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; } |
|
4205 |
||
4206 |
#ifndef PRODUCT |
|
4207 |
int* pst_counter = &SharedRuntime::_partial_subtype_ctr; |
|
4208 |
ExternalAddress pst_counter_addr((address) pst_counter); |
|
4209 |
NOT_LP64( incrementl(pst_counter_addr) ); |
|
4210 |
LP64_ONLY( lea(rcx, pst_counter_addr) ); |
|
4211 |
LP64_ONLY( incrementl(Address(rcx, 0)) ); |
|
4212 |
#endif //PRODUCT |
|
4213 |
||
4214 |
// We will consult the secondary-super array. |
|
4215 |
movptr(rdi, secondary_supers_addr); |
|
4216 |
// Load the array length. (Positive movl does right thing on LP64.) |
|
4217 |
movl(rcx, Address(rdi, Array<Klass*>::length_offset_in_bytes())); |
|
4218 |
// Skip to start of data. |
|
4219 |
addptr(rdi, Array<Klass*>::base_offset_in_bytes()); |
|
4220 |
||
4221 |
// Scan RCX words at [RDI] for an occurrence of RAX. |
|
4222 |
// Set NZ/Z based on last compare. |
|
4223 |
// Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does |
|
4224 |
// not change flags (only scas instruction which is repeated sets flags). |
|
4225 |
// Set Z = 0 (not equal) before 'repne' to indicate that class was not found. |
|
4226 |
||
4227 |
testptr(rax,rax); // Set Z = 0 |
|
4228 |
repne_scan(); |
|
4229 |
||
4230 |
// Unspill the temp. registers: |
|
4231 |
if (pushed_rdi) pop(rdi); |
|
4232 |
if (pushed_rcx) pop(rcx); |
|
4233 |
if (pushed_rax) pop(rax); |
|
4234 |
||
4235 |
if (set_cond_codes) { |
|
4236 |
// Special hack for the AD files: rdi is guaranteed non-zero. |
|
4237 |
assert(!pushed_rdi, "rdi must be left non-NULL"); |
|
4238 |
// Also, the condition codes are properly set Z/NZ on succeed/failure. |
|
4239 |
} |
|
4240 |
||
4241 |
if (L_failure == &L_fallthrough) |
|
4242 |
jccb(Assembler::notEqual, *L_failure); |
|
4243 |
else jcc(Assembler::notEqual, *L_failure); |
|
4244 |
||
4245 |
// Success. Cache the super we found and proceed in triumph. |
|
4246 |
movptr(super_cache_addr, super_klass); |
|
4247 |
||
4248 |
if (L_success != &L_fallthrough) { |
|
4249 |
jmp(*L_success); |
|
4250 |
} |
|
4251 |
||
4252 |
#undef IS_A_TEMP |
|
4253 |
||
4254 |
bind(L_fallthrough); |
|
4255 |
} |
|
4256 |
||
4257 |
||
4258 |
void MacroAssembler::cmov32(Condition cc, Register dst, Address src) { |
|
4259 |
if (VM_Version::supports_cmov()) { |
|
4260 |
cmovl(cc, dst, src); |
|
4261 |
} else { |
|
4262 |
Label L; |
|
4263 |
jccb(negate_condition(cc), L); |
|
4264 |
movl(dst, src); |
|
4265 |
bind(L); |
|
4266 |
} |
|
4267 |
} |
|
4268 |
||
4269 |
void MacroAssembler::cmov32(Condition cc, Register dst, Register src) { |
|
4270 |
if (VM_Version::supports_cmov()) { |
|
4271 |
cmovl(cc, dst, src); |
|
4272 |
} else { |
|
4273 |
Label L; |
|
4274 |
jccb(negate_condition(cc), L); |
|
4275 |
movl(dst, src); |
|
4276 |
bind(L); |
|
4277 |
} |
|
4278 |
} |
|
4279 |
||
4280 |
void MacroAssembler::verify_oop(Register reg, const char* s) { |
|
4281 |
if (!VerifyOops) return; |
|
4282 |
||
4283 |
// Pass register number to verify_oop_subroutine |
|
16368
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15612
diff
changeset
|
4284 |
const char* b = NULL; |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15612
diff
changeset
|
4285 |
{ |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15612
diff
changeset
|
4286 |
ResourceMark rm; |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15612
diff
changeset
|
4287 |
stringStream ss; |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15612
diff
changeset
|
4288 |
ss.print("verify_oop: %s: %s", reg->name(), s); |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15612
diff
changeset
|
4289 |
b = code_string(ss.as_string()); |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15612
diff
changeset
|
4290 |
} |
14626 | 4291 |
BLOCK_COMMENT("verify_oop {"); |
4292 |
#ifdef _LP64 |
|
4293 |
push(rscratch1); // save r10, trashed by movptr() |
|
4294 |
#endif |
|
4295 |
push(rax); // save rax, |
|
4296 |
push(reg); // pass register argument |
|
4297 |
ExternalAddress buffer((address) b); |
|
4298 |
// avoid using pushptr, as it modifies scratch registers |
|
4299 |
// and our contract is not to modify anything |
|
4300 |
movptr(rax, buffer.addr()); |
|
4301 |
push(rax); |
|
4302 |
// call indirectly to solve generation ordering problem |
|
4303 |
movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); |
|
4304 |
call(rax); |
|
4305 |
// Caller pops the arguments (oop, message) and restores rax, r10 |
|
4306 |
BLOCK_COMMENT("} verify_oop"); |
|
4307 |
} |
|
4308 |
||
4309 |
||
4310 |
RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, |
|
4311 |
Register tmp, |
|
4312 |
int offset) { |
|
4313 |
intptr_t value = *delayed_value_addr; |
|
4314 |
if (value != 0) |
|
4315 |
return RegisterOrConstant(value + offset); |
|
4316 |
||
4317 |
// load indirectly to solve generation ordering problem |
|
4318 |
movptr(tmp, ExternalAddress((address) delayed_value_addr)); |
|
4319 |
||
4320 |
#ifdef ASSERT |
|
4321 |
{ Label L; |
|
4322 |
testptr(tmp, tmp); |
|
4323 |
if (WizardMode) { |
|
16368
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15612
diff
changeset
|
4324 |
const char* buf = NULL; |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15612
diff
changeset
|
4325 |
{ |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15612
diff
changeset
|
4326 |
ResourceMark rm; |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15612
diff
changeset
|
4327 |
stringStream ss; |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15612
diff
changeset
|
4328 |
ss.print("DelayedValue="INTPTR_FORMAT, delayed_value_addr[1]); |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15612
diff
changeset
|
4329 |
buf = code_string(ss.as_string()); |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15612
diff
changeset
|
4330 |
} |
14626 | 4331 |
jcc(Assembler::notZero, L); |
4332 |
STOP(buf); |
|
4333 |
} else { |
|
4334 |
jccb(Assembler::notZero, L); |
|
4335 |
hlt(); |
|
4336 |
} |
|
4337 |
bind(L); |
|
4338 |
} |
|
4339 |
#endif |
|
4340 |
||
4341 |
if (offset != 0) |
|
4342 |
addptr(tmp, offset); |
|
4343 |
||
4344 |
return RegisterOrConstant(tmp); |
|
4345 |
} |
|
4346 |
||
4347 |
||
4348 |
Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, |
|
4349 |
int extra_slot_offset) { |
|
4350 |
// cf. TemplateTable::prepare_invoke(), if (load_receiver). |
|
4351 |
int stackElementSize = Interpreter::stackElementSize; |
|
4352 |
int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); |
|
4353 |
#ifdef ASSERT |
|
4354 |
int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); |
|
4355 |
assert(offset1 - offset == stackElementSize, "correct arithmetic"); |
|
4356 |
#endif |
|
4357 |
Register scale_reg = noreg; |
|
4358 |
Address::ScaleFactor scale_factor = Address::no_scale; |
|
4359 |
if (arg_slot.is_constant()) { |
|
4360 |
offset += arg_slot.as_constant() * stackElementSize; |
|
4361 |
} else { |
|
4362 |
scale_reg = arg_slot.as_register(); |
|
4363 |
scale_factor = Address::times(stackElementSize); |
|
4364 |
} |
|
4365 |
offset += wordSize; // return PC is on stack |
|
4366 |
return Address(rsp, scale_reg, scale_factor, offset); |
|
4367 |
} |
|
4368 |
||
4369 |
||
4370 |
void MacroAssembler::verify_oop_addr(Address addr, const char* s) { |
|
4371 |
if (!VerifyOops) return; |
|
4372 |
||
4373 |
// Address adjust(addr.base(), addr.index(), addr.scale(), addr.disp() + BytesPerWord); |
|
4374 |
// Pass register number to verify_oop_subroutine |
|
16368
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15612
diff
changeset
|
4375 |
const char* b = NULL; |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15612
diff
changeset
|
4376 |
{ |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15612
diff
changeset
|
4377 |
ResourceMark rm; |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15612
diff
changeset
|
4378 |
stringStream ss; |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15612
diff
changeset
|
4379 |
ss.print("verify_oop_addr: %s", s); |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15612
diff
changeset
|
4380 |
b = code_string(ss.as_string()); |
713209c45a82
8008555: Debugging code in compiled method sometimes leaks memory
roland
parents:
15612
diff
changeset
|
4381 |
} |
14626 | 4382 |
#ifdef _LP64 |
4383 |
push(rscratch1); // save r10, trashed by movptr() |
|
4384 |
#endif |
|
4385 |
push(rax); // save rax, |
|
4386 |
// addr may contain rsp so we will have to adjust it based on the push |
|
4387 |
// we just did (and on 64 bit we do two pushes) |
|
4388 |
// NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which |
|
4389 |
// stores rax into addr which is backwards of what was intended. |
|
4390 |
if (addr.uses(rsp)) { |
|
4391 |
lea(rax, addr); |
|
4392 |
pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord)); |
|
4393 |
} else { |
|
4394 |
pushptr(addr); |
|
4395 |
} |
|
4396 |
||
4397 |
ExternalAddress buffer((address) b); |
|
4398 |
// pass msg argument |
|
4399 |
// avoid using pushptr, as it modifies scratch registers |
|
4400 |
// and our contract is not to modify anything |
|
4401 |
movptr(rax, buffer.addr()); |
|
4402 |
push(rax); |
|
4403 |
||
4404 |
// call indirectly to solve generation ordering problem |
|
4405 |
movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); |
|
4406 |
call(rax); |
|
4407 |
// Caller pops the arguments (addr, message) and restores rax, r10. |
|
4408 |
} |
|
4409 |
||
4410 |
void MacroAssembler::verify_tlab() { |
|
4411 |
#ifdef ASSERT |
|
4412 |
if (UseTLAB && VerifyOops) { |
|
4413 |
Label next, ok; |
|
4414 |
Register t1 = rsi; |
|
4415 |
Register thread_reg = NOT_LP64(rbx) LP64_ONLY(r15_thread); |
|
4416 |
||
4417 |
push(t1); |
|
4418 |
NOT_LP64(push(thread_reg)); |
|
4419 |
NOT_LP64(get_thread(thread_reg)); |
|
4420 |
||
4421 |
movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); |
|
4422 |
cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset()))); |
|
4423 |
jcc(Assembler::aboveEqual, next); |
|
4424 |
STOP("assert(top >= start)"); |
|
4425 |
should_not_reach_here(); |
|
4426 |
||
4427 |
bind(next); |
|
4428 |
movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset()))); |
|
4429 |
cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); |
|
4430 |
jcc(Assembler::aboveEqual, ok); |
|
4431 |
STOP("assert(top <= end)"); |
|
4432 |
should_not_reach_here(); |
|
4433 |
||
4434 |
bind(ok); |
|
4435 |
NOT_LP64(pop(thread_reg)); |
|
4436 |
pop(t1); |
|
4437 |
} |
|
4438 |
#endif |
|
4439 |
} |
|
4440 |
||
4441 |
class ControlWord { |
|
4442 |
public: |
|
4443 |
int32_t _value; |
|
4444 |
||
4445 |
int rounding_control() const { return (_value >> 10) & 3 ; } |
|
4446 |
int precision_control() const { return (_value >> 8) & 3 ; } |
|
4447 |
bool precision() const { return ((_value >> 5) & 1) != 0; } |
|
4448 |
bool underflow() const { return ((_value >> 4) & 1) != 0; } |
|
4449 |
bool overflow() const { return ((_value >> 3) & 1) != 0; } |
|
4450 |
bool zero_divide() const { return ((_value >> 2) & 1) != 0; } |
|
4451 |
bool denormalized() const { return ((_value >> 1) & 1) != 0; } |
|
4452 |
bool invalid() const { return ((_value >> 0) & 1) != 0; } |
|
4453 |
||
4454 |
void print() const { |
|
4455 |
// rounding control |
|
4456 |
const char* rc; |
|
4457 |
switch (rounding_control()) { |
|
4458 |
case 0: rc = "round near"; break; |
|
4459 |
case 1: rc = "round down"; break; |
|
4460 |
case 2: rc = "round up "; break; |
|
4461 |
case 3: rc = "chop "; break; |
|
4462 |
}; |
|
4463 |
// precision control |
|
4464 |
const char* pc; |
|
4465 |
switch (precision_control()) { |
|
4466 |
case 0: pc = "24 bits "; break; |
|
4467 |
case 1: pc = "reserved"; break; |
|
4468 |
case 2: pc = "53 bits "; break; |
|
4469 |
case 3: pc = "64 bits "; break; |
|
4470 |
}; |
|
4471 |
// flags |
|
4472 |
char f[9]; |
|
4473 |
f[0] = ' '; |
|
4474 |
f[1] = ' '; |
|
4475 |
f[2] = (precision ()) ? 'P' : 'p'; |
|
4476 |
f[3] = (underflow ()) ? 'U' : 'u'; |
|
4477 |
f[4] = (overflow ()) ? 'O' : 'o'; |
|
4478 |
f[5] = (zero_divide ()) ? 'Z' : 'z'; |
|
4479 |
f[6] = (denormalized()) ? 'D' : 'd'; |
|
4480 |
f[7] = (invalid ()) ? 'I' : 'i'; |
|
4481 |
f[8] = '\x0'; |
|
4482 |
// output |
|
4483 |
printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc); |
|
4484 |
} |
|
4485 |
||
4486 |
}; |
|
4487 |
||
4488 |
class StatusWord { |
|
4489 |
public: |
|
4490 |
int32_t _value; |
|
4491 |
||
4492 |
bool busy() const { return ((_value >> 15) & 1) != 0; } |
|
4493 |
bool C3() const { return ((_value >> 14) & 1) != 0; } |
|
4494 |
bool C2() const { return ((_value >> 10) & 1) != 0; } |
|
4495 |
bool C1() const { return ((_value >> 9) & 1) != 0; } |
|
4496 |
bool C0() const { return ((_value >> 8) & 1) != 0; } |
|
4497 |
int top() const { return (_value >> 11) & 7 ; } |
|
4498 |
bool error_status() const { return ((_value >> 7) & 1) != 0; } |
|
4499 |
bool stack_fault() const { return ((_value >> 6) & 1) != 0; } |
|
4500 |
bool precision() const { return ((_value >> 5) & 1) != 0; } |
|
4501 |
bool underflow() const { return ((_value >> 4) & 1) != 0; } |
|
4502 |
bool overflow() const { return ((_value >> 3) & 1) != 0; } |
|
4503 |
bool zero_divide() const { return ((_value >> 2) & 1) != 0; } |
|
4504 |
bool denormalized() const { return ((_value >> 1) & 1) != 0; } |
|
4505 |
bool invalid() const { return ((_value >> 0) & 1) != 0; } |
|
4506 |
||
4507 |
void print() const { |
|
4508 |
// condition codes |
|
4509 |
char c[5]; |
|
4510 |
c[0] = (C3()) ? '3' : '-'; |
|
4511 |
c[1] = (C2()) ? '2' : '-'; |
|
4512 |
c[2] = (C1()) ? '1' : '-'; |
|
4513 |
c[3] = (C0()) ? '0' : '-'; |
|
4514 |
c[4] = '\x0'; |
|
4515 |
// flags |
|
4516 |
char f[9]; |
|
4517 |
f[0] = (error_status()) ? 'E' : '-'; |
|
4518 |
f[1] = (stack_fault ()) ? 'S' : '-'; |
|
4519 |
f[2] = (precision ()) ? 'P' : '-'; |
|
4520 |
f[3] = (underflow ()) ? 'U' : '-'; |
|
4521 |
f[4] = (overflow ()) ? 'O' : '-'; |
|
4522 |
f[5] = (zero_divide ()) ? 'Z' : '-'; |
|
4523 |
f[6] = (denormalized()) ? 'D' : '-'; |
|
4524 |
f[7] = (invalid ()) ? 'I' : '-'; |
|
4525 |
f[8] = '\x0'; |
|
4526 |
// output |
|
4527 |
printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top()); |
|
4528 |
} |
|
4529 |
||
4530 |
}; |
|
4531 |
||
4532 |
class TagWord { |
|
4533 |
public: |
|
4534 |
int32_t _value; |
|
4535 |
||
4536 |
int tag_at(int i) const { return (_value >> (i*2)) & 3; } |
|
4537 |
||
4538 |
void print() const { |
|
4539 |
printf("%04x", _value & 0xFFFF); |
|
4540 |
} |
|
4541 |
||
4542 |
}; |
|
4543 |
||
4544 |
class FPU_Register { |
|
4545 |
public: |
|
4546 |
int32_t _m0; |
|
4547 |
int32_t _m1; |
|
4548 |
int16_t _ex; |
|
4549 |
||
4550 |
bool is_indefinite() const { |
|
4551 |
return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0; |
|
4552 |
} |
|
4553 |
||
4554 |
void print() const { |
|
4555 |
char sign = (_ex < 0) ? '-' : '+'; |
|
4556 |
const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " "; |
|
4557 |
printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind); |
|
4558 |
}; |
|
4559 |
||
4560 |
}; |
|
4561 |
||
4562 |
class FPU_State { |
|
4563 |
public: |
|
4564 |
enum { |
|
4565 |
register_size = 10, |
|
4566 |
number_of_registers = 8, |
|
4567 |
register_mask = 7 |
|
4568 |
}; |
|
4569 |
||
4570 |
ControlWord _control_word; |
|
4571 |
StatusWord _status_word; |
|
4572 |
TagWord _tag_word; |
|
4573 |
int32_t _error_offset; |
|
4574 |
int32_t _error_selector; |
|
4575 |
int32_t _data_offset; |
|
4576 |
int32_t _data_selector; |
|
4577 |
int8_t _register[register_size * number_of_registers]; |
|
4578 |
||
4579 |
int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); } |
|
4580 |
FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; } |
|
4581 |
||
4582 |
const char* tag_as_string(int tag) const { |
|
4583 |
switch (tag) { |
|
4584 |
case 0: return "valid"; |
|
4585 |
case 1: return "zero"; |
|
4586 |
case 2: return "special"; |
|
4587 |
case 3: return "empty"; |
|
4588 |
} |
|
4589 |
ShouldNotReachHere(); |
|
4590 |
return NULL; |
|
4591 |
} |
|
4592 |
||
4593 |
void print() const { |
|
4594 |
// print computation registers |
|
4595 |
{ int t = _status_word.top(); |
|
4596 |
for (int i = 0; i < number_of_registers; i++) { |
|
4597 |
int j = (i - t) & register_mask; |
|
4598 |
printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j); |
|
4599 |
st(j)->print(); |
|
4600 |
printf(" %s\n", tag_as_string(_tag_word.tag_at(i))); |
|
4601 |
} |
|
4602 |
} |
|
4603 |
printf("\n"); |
|
4604 |
// print control registers |
|
4605 |
printf("ctrl = "); _control_word.print(); printf("\n"); |
|
4606 |
printf("stat = "); _status_word .print(); printf("\n"); |
|
4607 |
printf("tags = "); _tag_word .print(); printf("\n"); |
|
4608 |
} |
|
4609 |
||
4610 |
}; |
|
4611 |
||
4612 |
class Flag_Register { |
|
4613 |
public: |
|
4614 |
int32_t _value; |
|
4615 |
||
4616 |
bool overflow() const { return ((_value >> 11) & 1) != 0; } |
|
4617 |
bool direction() const { return ((_value >> 10) & 1) != 0; } |
|
4618 |
bool sign() const { return ((_value >> 7) & 1) != 0; } |
|
4619 |
bool zero() const { return ((_value >> 6) & 1) != 0; } |
|
4620 |
bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; } |
|
4621 |
bool parity() const { return ((_value >> 2) & 1) != 0; } |
|
4622 |
bool carry() const { return ((_value >> 0) & 1) != 0; } |
|
4623 |
||
4624 |
void print() const { |
|
4625 |
// flags |
|
4626 |
char f[8]; |
|
4627 |
f[0] = (overflow ()) ? 'O' : '-'; |
|
4628 |
f[1] = (direction ()) ? 'D' : '-'; |
|
4629 |
f[2] = (sign ()) ? 'S' : '-'; |
|
4630 |
f[3] = (zero ()) ? 'Z' : '-'; |
|
4631 |
f[4] = (auxiliary_carry()) ? 'A' : '-'; |
|
4632 |
f[5] = (parity ()) ? 'P' : '-'; |
|
4633 |
f[6] = (carry ()) ? 'C' : '-'; |
|
4634 |
f[7] = '\x0'; |
|
4635 |
// output |
|
4636 |
printf("%08x flags = %s", _value, f); |
|
4637 |
} |
|
4638 |
||
4639 |
}; |
|
4640 |
||
4641 |
class IU_Register { |
|
4642 |
public: |
|
4643 |
int32_t _value; |
|
4644 |
||
4645 |
void print() const { |
|
4646 |
printf("%08x %11d", _value, _value); |
|
4647 |
} |
|
4648 |
||
4649 |
}; |
|
4650 |
||
4651 |
class IU_State { |
|
4652 |
public: |
|
4653 |
Flag_Register _eflags; |
|
4654 |
IU_Register _rdi; |
|
4655 |
IU_Register _rsi; |
|
4656 |
IU_Register _rbp; |
|
4657 |
IU_Register _rsp; |
|
4658 |
IU_Register _rbx; |
|
4659 |
IU_Register _rdx; |
|
4660 |
IU_Register _rcx; |
|
4661 |
IU_Register _rax; |
|
4662 |
||
4663 |
void print() const { |
|
4664 |
// computation registers |
|
4665 |
printf("rax, = "); _rax.print(); printf("\n"); |
|
4666 |
printf("rbx, = "); _rbx.print(); printf("\n"); |
|
4667 |
printf("rcx = "); _rcx.print(); printf("\n"); |
|
4668 |
printf("rdx = "); _rdx.print(); printf("\n"); |
|
4669 |
printf("rdi = "); _rdi.print(); printf("\n"); |
|
4670 |
printf("rsi = "); _rsi.print(); printf("\n"); |
|
4671 |
printf("rbp, = "); _rbp.print(); printf("\n"); |
|
4672 |
printf("rsp = "); _rsp.print(); printf("\n"); |
|
4673 |
printf("\n"); |
|
4674 |
// control registers |
|
4675 |
printf("flgs = "); _eflags.print(); printf("\n"); |
|
4676 |
} |
|
4677 |
}; |
|
4678 |
||
4679 |
||
4680 |
class CPU_State { |
|
4681 |
public: |
|
4682 |
FPU_State _fpu_state; |
|
4683 |
IU_State _iu_state; |
|
4684 |
||
4685 |
void print() const { |
|
4686 |
printf("--------------------------------------------------\n"); |
|
4687 |
_iu_state .print(); |
|
4688 |
printf("\n"); |
|
4689 |
_fpu_state.print(); |
|
4690 |
printf("--------------------------------------------------\n"); |
|
4691 |
} |
|
4692 |
||
4693 |
}; |
|
4694 |
||
4695 |
||
4696 |
static void _print_CPU_state(CPU_State* state) { |
|
4697 |
state->print(); |
|
4698 |
}; |
|
4699 |
||
4700 |
||
4701 |
void MacroAssembler::print_CPU_state() { |
|
4702 |
push_CPU_state(); |
|
4703 |
push(rsp); // pass CPU state |
|
4704 |
call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state))); |
|
4705 |
addptr(rsp, wordSize); // discard argument |
|
4706 |
pop_CPU_state(); |
|
4707 |
} |
|
4708 |
||
4709 |
||
4710 |
static bool _verify_FPU(int stack_depth, char* s, CPU_State* state) { |
|
4711 |
static int counter = 0; |
|
4712 |
FPU_State* fs = &state->_fpu_state; |
|
4713 |
counter++; |
|
4714 |
// For leaf calls, only verify that the top few elements remain empty. |
|
4715 |
// We only need 1 empty at the top for C2 code. |
|
4716 |
if( stack_depth < 0 ) { |
|
4717 |
if( fs->tag_for_st(7) != 3 ) { |
|
4718 |
printf("FPR7 not empty\n"); |
|
4719 |
state->print(); |
|
4720 |
assert(false, "error"); |
|
4721 |
return false; |
|
4722 |
} |
|
4723 |
return true; // All other stack states do not matter |
|
4724 |
} |
|
4725 |
||
4726 |
assert((fs->_control_word._value & 0xffff) == StubRoutines::_fpu_cntrl_wrd_std, |
|
4727 |
"bad FPU control word"); |
|
4728 |
||
4729 |
// compute stack depth |
|
4730 |
int i = 0; |
|
4731 |
while (i < FPU_State::number_of_registers && fs->tag_for_st(i) < 3) i++; |
|
4732 |
int d = i; |
|
4733 |
while (i < FPU_State::number_of_registers && fs->tag_for_st(i) == 3) i++; |
|
4734 |
// verify findings |
|
4735 |
if (i != FPU_State::number_of_registers) { |
|
4736 |
// stack not contiguous |
|
4737 |
printf("%s: stack not contiguous at ST%d\n", s, i); |
|
4738 |
state->print(); |
|
4739 |
assert(false, "error"); |
|
4740 |
return false; |
|
4741 |
} |
|
4742 |
// check if computed stack depth corresponds to expected stack depth |
|
4743 |
if (stack_depth < 0) { |
|
4744 |
// expected stack depth is -stack_depth or less |
|
4745 |
if (d > -stack_depth) { |
|
4746 |
// too many elements on the stack |
|
4747 |
printf("%s: <= %d stack elements expected but found %d\n", s, -stack_depth, d); |
|
4748 |
state->print(); |
|
4749 |
assert(false, "error"); |
|
4750 |
return false; |
|
4751 |
} |
|
4752 |
} else { |
|
4753 |
// expected stack depth is stack_depth |
|
4754 |
if (d != stack_depth) { |
|
4755 |
// wrong stack depth |
|
4756 |
printf("%s: %d stack elements expected but found %d\n", s, stack_depth, d); |
|
4757 |
state->print(); |
|
4758 |
assert(false, "error"); |
|
4759 |
return false; |
|
4760 |
} |
|
4761 |
} |
|
4762 |
// everything is cool |
|
4763 |
return true; |
|
4764 |
} |
|
4765 |
||
4766 |
||
4767 |
void MacroAssembler::verify_FPU(int stack_depth, const char* s) { |
|
4768 |
if (!VerifyFPU) return; |
|
4769 |
push_CPU_state(); |
|
4770 |
push(rsp); // pass CPU state |
|
4771 |
ExternalAddress msg((address) s); |
|
4772 |
// pass message string s |
|
4773 |
pushptr(msg.addr()); |
|
4774 |
push(stack_depth); // pass stack depth |
|
4775 |
call(RuntimeAddress(CAST_FROM_FN_PTR(address, _verify_FPU))); |
|
4776 |
addptr(rsp, 3 * wordSize); // discard arguments |
|
4777 |
// check for error |
|
4778 |
{ Label L; |
|
4779 |
testl(rax, rax); |
|
4780 |
jcc(Assembler::notZero, L); |
|
4781 |
int3(); // break if error condition |
|
4782 |
bind(L); |
|
4783 |
} |
|
4784 |
pop_CPU_state(); |
|
4785 |
} |
|
4786 |
||
16624
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
4787 |
void MacroAssembler::restore_cpu_control_state_after_jni() { |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
4788 |
// Either restore the MXCSR register after returning from the JNI Call |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
4789 |
// or verify that it wasn't changed (with -Xcheck:jni flag). |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
4790 |
if (VM_Version::supports_sse()) { |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
4791 |
if (RestoreMXCSROnJNICalls) { |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
4792 |
ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std())); |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
4793 |
} else if (CheckJNICalls) { |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
4794 |
call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry())); |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
4795 |
} |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
4796 |
} |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
4797 |
if (VM_Version::supports_avx()) { |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
4798 |
// Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty. |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
4799 |
vzeroupper(); |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
4800 |
} |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
4801 |
|
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
4802 |
#ifndef _LP64 |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
4803 |
// Either restore the x87 floating pointer control word after returning |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
4804 |
// from the JNI call or verify that it wasn't changed. |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
4805 |
if (CheckJNICalls) { |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
4806 |
call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry())); |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
4807 |
} |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
4808 |
#endif // _LP64 |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
4809 |
} |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
4810 |
|
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
4811 |
|
14626 | 4812 |
void MacroAssembler::load_klass(Register dst, Register src) { |
4813 |
#ifdef _LP64 |
|
19979
ebe1dbb6e1aa
8015107: NPG: Use consistent naming for metaspace concepts
ehelin
parents:
19319
diff
changeset
|
4814 |
if (UseCompressedClassPointers) { |
14626 | 4815 |
movl(dst, Address(src, oopDesc::klass_offset_in_bytes())); |
4816 |
decode_klass_not_null(dst); |
|
4817 |
} else |
|
4818 |
#endif |
|
4819 |
movptr(dst, Address(src, oopDesc::klass_offset_in_bytes())); |
|
4820 |
} |
|
4821 |
||
4822 |
void MacroAssembler::load_prototype_header(Register dst, Register src) { |
|
19319
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
4823 |
load_klass(dst, src); |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
4824 |
movptr(dst, Address(dst, Klass::prototype_header_offset())); |
14626 | 4825 |
} |
4826 |
||
4827 |
void MacroAssembler::store_klass(Register dst, Register src) { |
|
4828 |
#ifdef _LP64 |
|
19979
ebe1dbb6e1aa
8015107: NPG: Use consistent naming for metaspace concepts
ehelin
parents:
19319
diff
changeset
|
4829 |
if (UseCompressedClassPointers) { |
14626 | 4830 |
encode_klass_not_null(src); |
4831 |
movl(Address(dst, oopDesc::klass_offset_in_bytes()), src); |
|
4832 |
} else |
|
4833 |
#endif |
|
4834 |
movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src); |
|
4835 |
} |
|
4836 |
||
4837 |
void MacroAssembler::load_heap_oop(Register dst, Address src) { |
|
4838 |
#ifdef _LP64 |
|
4839 |
// FIXME: Must change all places where we try to load the klass. |
|
4840 |
if (UseCompressedOops) { |
|
4841 |
movl(dst, src); |
|
4842 |
decode_heap_oop(dst); |
|
4843 |
} else |
|
4844 |
#endif |
|
4845 |
movptr(dst, src); |
|
4846 |
} |
|
4847 |
||
4848 |
// Doesn't do verfication, generates fixed size code |
|
4849 |
void MacroAssembler::load_heap_oop_not_null(Register dst, Address src) { |
|
4850 |
#ifdef _LP64 |
|
4851 |
if (UseCompressedOops) { |
|
4852 |
movl(dst, src); |
|
4853 |
decode_heap_oop_not_null(dst); |
|
4854 |
} else |
|
4855 |
#endif |
|
4856 |
movptr(dst, src); |
|
4857 |
} |
|
4858 |
||
4859 |
void MacroAssembler::store_heap_oop(Address dst, Register src) { |
|
4860 |
#ifdef _LP64 |
|
4861 |
if (UseCompressedOops) { |
|
4862 |
assert(!dst.uses(src), "not enough registers"); |
|
4863 |
encode_heap_oop(src); |
|
4864 |
movl(dst, src); |
|
4865 |
} else |
|
4866 |
#endif |
|
4867 |
movptr(dst, src); |
|
4868 |
} |
|
4869 |
||
4870 |
void MacroAssembler::cmp_heap_oop(Register src1, Address src2, Register tmp) { |
|
4871 |
assert_different_registers(src1, tmp); |
|
4872 |
#ifdef _LP64 |
|
4873 |
if (UseCompressedOops) { |
|
4874 |
bool did_push = false; |
|
4875 |
if (tmp == noreg) { |
|
4876 |
tmp = rax; |
|
4877 |
push(tmp); |
|
4878 |
did_push = true; |
|
4879 |
assert(!src2.uses(rsp), "can't push"); |
|
4880 |
} |
|
4881 |
load_heap_oop(tmp, src2); |
|
4882 |
cmpptr(src1, tmp); |
|
4883 |
if (did_push) pop(tmp); |
|
4884 |
} else |
|
4885 |
#endif |
|
4886 |
cmpptr(src1, src2); |
|
4887 |
} |
|
4888 |
||
4889 |
// Used for storing NULLs. |
|
4890 |
void MacroAssembler::store_heap_oop_null(Address dst) { |
|
4891 |
#ifdef _LP64 |
|
4892 |
if (UseCompressedOops) { |
|
4893 |
movl(dst, (int32_t)NULL_WORD); |
|
4894 |
} else { |
|
4895 |
movslq(dst, (int32_t)NULL_WORD); |
|
4896 |
} |
|
4897 |
#else |
|
4898 |
movl(dst, (int32_t)NULL_WORD); |
|
4899 |
#endif |
|
4900 |
} |
|
4901 |
||
4902 |
#ifdef _LP64 |
|
4903 |
void MacroAssembler::store_klass_gap(Register dst, Register src) { |
|
19979
ebe1dbb6e1aa
8015107: NPG: Use consistent naming for metaspace concepts
ehelin
parents:
19319
diff
changeset
|
4904 |
if (UseCompressedClassPointers) { |
14626 | 4905 |
// Store to klass gap in destination |
4906 |
movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src); |
|
4907 |
} |
|
4908 |
} |
|
4909 |
||
4910 |
#ifdef ASSERT |
|
4911 |
void MacroAssembler::verify_heapbase(const char* msg) { |
|
19319
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
4912 |
assert (UseCompressedOops, "should be compressed"); |
14626 | 4913 |
assert (Universe::heap() != NULL, "java heap should be initialized"); |
4914 |
if (CheckCompressedOops) { |
|
4915 |
Label ok; |
|
4916 |
push(rscratch1); // cmpptr trashes rscratch1 |
|
4917 |
cmpptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr())); |
|
4918 |
jcc(Assembler::equal, ok); |
|
4919 |
STOP(msg); |
|
4920 |
bind(ok); |
|
4921 |
pop(rscratch1); |
|
4922 |
} |
|
4923 |
} |
|
4924 |
#endif |
|
4925 |
||
4926 |
// Algorithm must match oop.inline.hpp encode_heap_oop. |
|
4927 |
void MacroAssembler::encode_heap_oop(Register r) { |
|
4928 |
#ifdef ASSERT |
|
4929 |
verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); |
|
4930 |
#endif |
|
4931 |
verify_oop(r, "broken oop in encode_heap_oop"); |
|
4932 |
if (Universe::narrow_oop_base() == NULL) { |
|
4933 |
if (Universe::narrow_oop_shift() != 0) { |
|
4934 |
assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
|
4935 |
shrq(r, LogMinObjAlignmentInBytes); |
|
4936 |
} |
|
4937 |
return; |
|
4938 |
} |
|
4939 |
testq(r, r); |
|
4940 |
cmovq(Assembler::equal, r, r12_heapbase); |
|
4941 |
subq(r, r12_heapbase); |
|
4942 |
shrq(r, LogMinObjAlignmentInBytes); |
|
4943 |
} |
|
4944 |
||
4945 |
void MacroAssembler::encode_heap_oop_not_null(Register r) { |
|
4946 |
#ifdef ASSERT |
|
4947 |
verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?"); |
|
4948 |
if (CheckCompressedOops) { |
|
4949 |
Label ok; |
|
4950 |
testq(r, r); |
|
4951 |
jcc(Assembler::notEqual, ok); |
|
4952 |
STOP("null oop passed to encode_heap_oop_not_null"); |
|
4953 |
bind(ok); |
|
4954 |
} |
|
4955 |
#endif |
|
4956 |
verify_oop(r, "broken oop in encode_heap_oop_not_null"); |
|
4957 |
if (Universe::narrow_oop_base() != NULL) { |
|
4958 |
subq(r, r12_heapbase); |
|
4959 |
} |
|
4960 |
if (Universe::narrow_oop_shift() != 0) { |
|
4961 |
assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
|
4962 |
shrq(r, LogMinObjAlignmentInBytes); |
|
4963 |
} |
|
4964 |
} |
|
4965 |
||
4966 |
void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { |
|
4967 |
#ifdef ASSERT |
|
4968 |
verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?"); |
|
4969 |
if (CheckCompressedOops) { |
|
4970 |
Label ok; |
|
4971 |
testq(src, src); |
|
4972 |
jcc(Assembler::notEqual, ok); |
|
4973 |
STOP("null oop passed to encode_heap_oop_not_null2"); |
|
4974 |
bind(ok); |
|
4975 |
} |
|
4976 |
#endif |
|
4977 |
verify_oop(src, "broken oop in encode_heap_oop_not_null2"); |
|
4978 |
if (dst != src) { |
|
4979 |
movq(dst, src); |
|
4980 |
} |
|
4981 |
if (Universe::narrow_oop_base() != NULL) { |
|
4982 |
subq(dst, r12_heapbase); |
|
4983 |
} |
|
4984 |
if (Universe::narrow_oop_shift() != 0) { |
|
4985 |
assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
|
4986 |
shrq(dst, LogMinObjAlignmentInBytes); |
|
4987 |
} |
|
4988 |
} |
|
4989 |
||
4990 |
void MacroAssembler::decode_heap_oop(Register r) { |
|
4991 |
#ifdef ASSERT |
|
4992 |
verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); |
|
4993 |
#endif |
|
4994 |
if (Universe::narrow_oop_base() == NULL) { |
|
4995 |
if (Universe::narrow_oop_shift() != 0) { |
|
4996 |
assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
|
4997 |
shlq(r, LogMinObjAlignmentInBytes); |
|
4998 |
} |
|
4999 |
} else { |
|
5000 |
Label done; |
|
5001 |
shlq(r, LogMinObjAlignmentInBytes); |
|
5002 |
jccb(Assembler::equal, done); |
|
5003 |
addq(r, r12_heapbase); |
|
5004 |
bind(done); |
|
5005 |
} |
|
5006 |
verify_oop(r, "broken oop in decode_heap_oop"); |
|
5007 |
} |
|
5008 |
||
5009 |
void MacroAssembler::decode_heap_oop_not_null(Register r) { |
|
5010 |
// Note: it will change flags |
|
5011 |
assert (UseCompressedOops, "should only be used for compressed headers"); |
|
5012 |
assert (Universe::heap() != NULL, "java heap should be initialized"); |
|
5013 |
// Cannot assert, unverified entry point counts instructions (see .ad file) |
|
5014 |
// vtableStubs also counts instructions in pd_code_size_limit. |
|
5015 |
// Also do not verify_oop as this is called by verify_oop. |
|
5016 |
if (Universe::narrow_oop_shift() != 0) { |
|
5017 |
assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
|
5018 |
shlq(r, LogMinObjAlignmentInBytes); |
|
5019 |
if (Universe::narrow_oop_base() != NULL) { |
|
5020 |
addq(r, r12_heapbase); |
|
5021 |
} |
|
5022 |
} else { |
|
5023 |
assert (Universe::narrow_oop_base() == NULL, "sanity"); |
|
5024 |
} |
|
5025 |
} |
|
5026 |
||
5027 |
void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { |
|
5028 |
// Note: it will change flags |
|
5029 |
assert (UseCompressedOops, "should only be used for compressed headers"); |
|
5030 |
assert (Universe::heap() != NULL, "java heap should be initialized"); |
|
5031 |
// Cannot assert, unverified entry point counts instructions (see .ad file) |
|
5032 |
// vtableStubs also counts instructions in pd_code_size_limit. |
|
5033 |
// Also do not verify_oop as this is called by verify_oop. |
|
5034 |
if (Universe::narrow_oop_shift() != 0) { |
|
5035 |
assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); |
|
5036 |
if (LogMinObjAlignmentInBytes == Address::times_8) { |
|
5037 |
leaq(dst, Address(r12_heapbase, src, Address::times_8, 0)); |
|
5038 |
} else { |
|
5039 |
if (dst != src) { |
|
5040 |
movq(dst, src); |
|
5041 |
} |
|
5042 |
shlq(dst, LogMinObjAlignmentInBytes); |
|
5043 |
if (Universe::narrow_oop_base() != NULL) { |
|
5044 |
addq(dst, r12_heapbase); |
|
5045 |
} |
|
5046 |
} |
|
5047 |
} else { |
|
5048 |
assert (Universe::narrow_oop_base() == NULL, "sanity"); |
|
5049 |
if (dst != src) { |
|
5050 |
movq(dst, src); |
|
5051 |
} |
|
5052 |
} |
|
5053 |
} |
|
5054 |
||
5055 |
void MacroAssembler::encode_klass_not_null(Register r) { |
|
21188
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
5056 |
if (Universe::narrow_klass_base() != NULL) { |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
5057 |
// Use r12 as a scratch register in which to temporarily load the narrow_klass_base. |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
5058 |
assert(r != r12_heapbase, "Encoding a klass in r12"); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
5059 |
mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base()); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
5060 |
subq(r, r12_heapbase); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
5061 |
} |
14626 | 5062 |
if (Universe::narrow_klass_shift() != 0) { |
5063 |
assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); |
|
5064 |
shrq(r, LogKlassAlignmentInBytes); |
|
5065 |
} |
|
21188
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
5066 |
if (Universe::narrow_klass_base() != NULL) { |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
5067 |
reinit_heapbase(); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
5068 |
} |
14626 | 5069 |
} |
5070 |
||
5071 |
void MacroAssembler::encode_klass_not_null(Register dst, Register src) { |
|
19319
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5072 |
if (dst == src) { |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5073 |
encode_klass_not_null(src); |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5074 |
} else { |
21188
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
5075 |
if (Universe::narrow_klass_base() != NULL) { |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
5076 |
mov64(dst, (int64_t)Universe::narrow_klass_base()); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
5077 |
negq(dst); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
5078 |
addq(dst, src); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
5079 |
} else { |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
5080 |
movptr(dst, src); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
5081 |
} |
19319
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5082 |
if (Universe::narrow_klass_shift() != 0) { |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5083 |
assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5084 |
shrq(dst, LogKlassAlignmentInBytes); |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5085 |
} |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5086 |
} |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5087 |
} |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5088 |
|
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5089 |
// Function instr_size_for_decode_klass_not_null() counts the instructions |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5090 |
// generated by decode_klass_not_null(register r) and reinit_heapbase(), |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5091 |
// when (Universe::heap() != NULL). Hence, if the instructions they |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5092 |
// generate change, then this method needs to be updated. |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5093 |
int MacroAssembler::instr_size_for_decode_klass_not_null() { |
19979
ebe1dbb6e1aa
8015107: NPG: Use consistent naming for metaspace concepts
ehelin
parents:
19319
diff
changeset
|
5094 |
assert (UseCompressedClassPointers, "only for compressed klass ptrs"); |
21188
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
5095 |
if (Universe::narrow_klass_base() != NULL) { |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
5096 |
// mov64 + addq + shlq? + mov64 (for reinit_heapbase()). |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
5097 |
return (Universe::narrow_klass_shift() == 0 ? 20 : 24); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
5098 |
} else { |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
5099 |
// longest load decode klass function, mov64, leaq |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
5100 |
return 16; |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
5101 |
} |
19319
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5102 |
} |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5103 |
|
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5104 |
// !!! If the instructions that get generated here change then function |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5105 |
// instr_size_for_decode_klass_not_null() needs to get updated. |
14626 | 5106 |
void MacroAssembler::decode_klass_not_null(Register r) { |
5107 |
// Note: it will change flags |
|
19979
ebe1dbb6e1aa
8015107: NPG: Use consistent naming for metaspace concepts
ehelin
parents:
19319
diff
changeset
|
5108 |
assert (UseCompressedClassPointers, "should only be used for compressed headers"); |
19319
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5109 |
assert(r != r12_heapbase, "Decoding a klass in r12"); |
14626 | 5110 |
// Cannot assert, unverified entry point counts instructions (see .ad file) |
5111 |
// vtableStubs also counts instructions in pd_code_size_limit. |
|
5112 |
// Also do not verify_oop as this is called by verify_oop. |
|
5113 |
if (Universe::narrow_klass_shift() != 0) { |
|
5114 |
assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); |
|
5115 |
shlq(r, LogKlassAlignmentInBytes); |
|
19319
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5116 |
} |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5117 |
// Use r12 as a scratch register in which to temporarily load the narrow_klass_base. |
21188
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
5118 |
if (Universe::narrow_klass_base() != NULL) { |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
5119 |
mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base()); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
5120 |
addq(r, r12_heapbase); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
5121 |
reinit_heapbase(); |
d053e4e8f901
8024927: Nashorn performance regression with CompressedOops
coleenp
parents:
20403
diff
changeset
|
5122 |
} |
14626 | 5123 |
} |
5124 |
||
5125 |
void MacroAssembler::decode_klass_not_null(Register dst, Register src) { |
|
5126 |
// Note: it will change flags |
|
19979
ebe1dbb6e1aa
8015107: NPG: Use consistent naming for metaspace concepts
ehelin
parents:
19319
diff
changeset
|
5127 |
assert (UseCompressedClassPointers, "should only be used for compressed headers"); |
19319
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5128 |
if (dst == src) { |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5129 |
decode_klass_not_null(dst); |
14626 | 5130 |
} else { |
19319
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5131 |
// Cannot assert, unverified entry point counts instructions (see .ad file) |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5132 |
// vtableStubs also counts instructions in pd_code_size_limit. |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5133 |
// Also do not verify_oop as this is called by verify_oop. |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5134 |
mov64(dst, (int64_t)Universe::narrow_klass_base()); |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5135 |
if (Universe::narrow_klass_shift() != 0) { |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5136 |
assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5137 |
assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?"); |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5138 |
leaq(dst, Address(dst, src, Address::times_8, 0)); |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5139 |
} else { |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5140 |
addq(dst, src); |
14626 | 5141 |
} |
5142 |
} |
|
5143 |
} |
|
5144 |
||
5145 |
void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { |
|
5146 |
assert (UseCompressedOops, "should only be used for compressed headers"); |
|
5147 |
assert (Universe::heap() != NULL, "java heap should be initialized"); |
|
5148 |
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); |
|
5149 |
int oop_index = oop_recorder()->find_index(obj); |
|
5150 |
RelocationHolder rspec = oop_Relocation::spec(oop_index); |
|
5151 |
mov_narrow_oop(dst, oop_index, rspec); |
|
5152 |
} |
|
5153 |
||
5154 |
void MacroAssembler::set_narrow_oop(Address dst, jobject obj) { |
|
5155 |
assert (UseCompressedOops, "should only be used for compressed headers"); |
|
5156 |
assert (Universe::heap() != NULL, "java heap should be initialized"); |
|
5157 |
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); |
|
5158 |
int oop_index = oop_recorder()->find_index(obj); |
|
5159 |
RelocationHolder rspec = oop_Relocation::spec(oop_index); |
|
5160 |
mov_narrow_oop(dst, oop_index, rspec); |
|
5161 |
} |
|
5162 |
||
5163 |
void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { |
|
19979
ebe1dbb6e1aa
8015107: NPG: Use consistent naming for metaspace concepts
ehelin
parents:
19319
diff
changeset
|
5164 |
assert (UseCompressedClassPointers, "should only be used for compressed headers"); |
14626 | 5165 |
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); |
5166 |
int klass_index = oop_recorder()->find_index(k); |
|
5167 |
RelocationHolder rspec = metadata_Relocation::spec(klass_index); |
|
19319
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5168 |
mov_narrow_oop(dst, Klass::encode_klass(k), rspec); |
14626 | 5169 |
} |
5170 |
||
5171 |
void MacroAssembler::set_narrow_klass(Address dst, Klass* k) { |
|
19979
ebe1dbb6e1aa
8015107: NPG: Use consistent naming for metaspace concepts
ehelin
parents:
19319
diff
changeset
|
5172 |
assert (UseCompressedClassPointers, "should only be used for compressed headers"); |
14626 | 5173 |
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); |
5174 |
int klass_index = oop_recorder()->find_index(k); |
|
5175 |
RelocationHolder rspec = metadata_Relocation::spec(klass_index); |
|
19319
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5176 |
mov_narrow_oop(dst, Klass::encode_klass(k), rspec); |
14626 | 5177 |
} |
5178 |
||
5179 |
void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) { |
|
5180 |
assert (UseCompressedOops, "should only be used for compressed headers"); |
|
5181 |
assert (Universe::heap() != NULL, "java heap should be initialized"); |
|
5182 |
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); |
|
5183 |
int oop_index = oop_recorder()->find_index(obj); |
|
5184 |
RelocationHolder rspec = oop_Relocation::spec(oop_index); |
|
5185 |
Assembler::cmp_narrow_oop(dst, oop_index, rspec); |
|
5186 |
} |
|
5187 |
||
5188 |
void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) { |
|
5189 |
assert (UseCompressedOops, "should only be used for compressed headers"); |
|
5190 |
assert (Universe::heap() != NULL, "java heap should be initialized"); |
|
5191 |
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); |
|
5192 |
int oop_index = oop_recorder()->find_index(obj); |
|
5193 |
RelocationHolder rspec = oop_Relocation::spec(oop_index); |
|
5194 |
Assembler::cmp_narrow_oop(dst, oop_index, rspec); |
|
5195 |
} |
|
5196 |
||
5197 |
void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) { |
|
19979
ebe1dbb6e1aa
8015107: NPG: Use consistent naming for metaspace concepts
ehelin
parents:
19319
diff
changeset
|
5198 |
assert (UseCompressedClassPointers, "should only be used for compressed headers"); |
14626 | 5199 |
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); |
5200 |
int klass_index = oop_recorder()->find_index(k); |
|
5201 |
RelocationHolder rspec = metadata_Relocation::spec(klass_index); |
|
19319
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5202 |
Assembler::cmp_narrow_oop(dst, Klass::encode_klass(k), rspec); |
14626 | 5203 |
} |
5204 |
||
5205 |
void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) { |
|
19979
ebe1dbb6e1aa
8015107: NPG: Use consistent naming for metaspace concepts
ehelin
parents:
19319
diff
changeset
|
5206 |
assert (UseCompressedClassPointers, "should only be used for compressed headers"); |
14626 | 5207 |
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); |
5208 |
int klass_index = oop_recorder()->find_index(k); |
|
5209 |
RelocationHolder rspec = metadata_Relocation::spec(klass_index); |
|
19319
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5210 |
Assembler::cmp_narrow_oop(dst, Klass::encode_klass(k), rspec); |
14626 | 5211 |
} |
5212 |
||
5213 |
void MacroAssembler::reinit_heapbase() { |
|
19979
ebe1dbb6e1aa
8015107: NPG: Use consistent naming for metaspace concepts
ehelin
parents:
19319
diff
changeset
|
5214 |
if (UseCompressedOops || UseCompressedClassPointers) { |
19319
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5215 |
if (Universe::heap() != NULL) { |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5216 |
if (Universe::narrow_oop_base() == NULL) { |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5217 |
MacroAssembler::xorptr(r12_heapbase, r12_heapbase); |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5218 |
} else { |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5219 |
mov64(r12_heapbase, (int64_t)Universe::narrow_ptrs_base()); |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5220 |
} |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5221 |
} else { |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5222 |
movptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr())); |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5223 |
} |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5224 |
} |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5225 |
} |
0ad35be0733a
8003424: Enable Class Data Sharing for CompressedOops
hseigel
parents:
18507
diff
changeset
|
5226 |
|
14626 | 5227 |
#endif // _LP64 |
5228 |
||
5229 |
||
5230 |
// C2 compiled method's prolog code. |
|
5231 |
void MacroAssembler::verified_entry(int framesize, bool stack_bang, bool fp_mode_24b) { |
|
5232 |
||
5233 |
// WARNING: Initial instruction MUST be 5 bytes or longer so that |
|
5234 |
// NativeJump::patch_verified_entry will be able to patch out the entry |
|
5235 |
// code safely. The push to verify stack depth is ok at 5 bytes, |
|
5236 |
// the frame allocation can be either 3 or 6 bytes. So if we don't do |
|
5237 |
// stack bang then we must use the 6 byte frame allocation even if |
|
5238 |
// we have no frame. :-( |
|
5239 |
||
5240 |
assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned"); |
|
5241 |
// Remove word for return addr |
|
5242 |
framesize -= wordSize; |
|
5243 |
||
5244 |
// Calls to C2R adapters often do not accept exceptional returns. |
|
5245 |
// We require that their callers must bang for them. But be careful, because |
|
5246 |
// some VM calls (such as call site linkage) can use several kilobytes of |
|
5247 |
// stack. But the stack safety zone should account for that. |
|
5248 |
// See bugs 4446381, 4468289, 4497237. |
|
5249 |
if (stack_bang) { |
|
5250 |
generate_stack_overflow_check(framesize); |
|
5251 |
||
5252 |
// We always push rbp, so that on return to interpreter rbp, will be |
|
5253 |
// restored correctly and we can correct the stack. |
|
5254 |
push(rbp); |
|
5255 |
// Remove word for ebp |
|
5256 |
framesize -= wordSize; |
|
5257 |
||
5258 |
// Create frame |
|
5259 |
if (framesize) { |
|
5260 |
subptr(rsp, framesize); |
|
5261 |
} |
|
5262 |
} else { |
|
5263 |
// Create frame (force generation of a 4 byte immediate value) |
|
5264 |
subptr_imm32(rsp, framesize); |
|
5265 |
||
5266 |
// Save RBP register now. |
|
5267 |
framesize -= wordSize; |
|
5268 |
movptr(Address(rsp, framesize), rbp); |
|
5269 |
} |
|
5270 |
||
5271 |
if (VerifyStackAtCalls) { // Majik cookie to verify stack depth |
|
5272 |
framesize -= wordSize; |
|
5273 |
movptr(Address(rsp, framesize), (int32_t)0xbadb100d); |
|
5274 |
} |
|
5275 |
||
5276 |
#ifndef _LP64 |
|
5277 |
// If method sets FPU control word do it now |
|
5278 |
if (fp_mode_24b) { |
|
5279 |
fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24())); |
|
5280 |
} |
|
5281 |
if (UseSSE >= 2 && VerifyFPU) { |
|
5282 |
verify_FPU(0, "FPU stack must be clean on entry"); |
|
5283 |
} |
|
5284 |
#endif |
|
5285 |
||
5286 |
#ifdef ASSERT |
|
5287 |
if (VerifyStackAtCalls) { |
|
5288 |
Label L; |
|
5289 |
push(rax); |
|
5290 |
mov(rax, rsp); |
|
5291 |
andptr(rax, StackAlignmentInBytes-1); |
|
5292 |
cmpptr(rax, StackAlignmentInBytes-wordSize); |
|
5293 |
pop(rax); |
|
5294 |
jcc(Assembler::equal, L); |
|
5295 |
STOP("Stack is not properly aligned!"); |
|
5296 |
bind(L); |
|
5297 |
} |
|
5298 |
#endif |
|
5299 |
||
5300 |
} |
|
5301 |
||
15114
4074553c678b
8005522: use fast-string instructions on x86 for zeroing
kvn
parents:
14837
diff
changeset
|
5302 |
void MacroAssembler::clear_mem(Register base, Register cnt, Register tmp) { |
4074553c678b
8005522: use fast-string instructions on x86 for zeroing
kvn
parents:
14837
diff
changeset
|
5303 |
// cnt - number of qwords (8-byte words). |
4074553c678b
8005522: use fast-string instructions on x86 for zeroing
kvn
parents:
14837
diff
changeset
|
5304 |
// base - start address, qword aligned. |
4074553c678b
8005522: use fast-string instructions on x86 for zeroing
kvn
parents:
14837
diff
changeset
|
5305 |
assert(base==rdi, "base register must be edi for rep stos"); |
4074553c678b
8005522: use fast-string instructions on x86 for zeroing
kvn
parents:
14837
diff
changeset
|
5306 |
assert(tmp==rax, "tmp register must be eax for rep stos"); |
4074553c678b
8005522: use fast-string instructions on x86 for zeroing
kvn
parents:
14837
diff
changeset
|
5307 |
assert(cnt==rcx, "cnt register must be ecx for rep stos"); |
4074553c678b
8005522: use fast-string instructions on x86 for zeroing
kvn
parents:
14837
diff
changeset
|
5308 |
|
4074553c678b
8005522: use fast-string instructions on x86 for zeroing
kvn
parents:
14837
diff
changeset
|
5309 |
xorptr(tmp, tmp); |
4074553c678b
8005522: use fast-string instructions on x86 for zeroing
kvn
parents:
14837
diff
changeset
|
5310 |
if (UseFastStosb) { |
4074553c678b
8005522: use fast-string instructions on x86 for zeroing
kvn
parents:
14837
diff
changeset
|
5311 |
shlptr(cnt,3); // convert to number of bytes |
4074553c678b
8005522: use fast-string instructions on x86 for zeroing
kvn
parents:
14837
diff
changeset
|
5312 |
rep_stosb(); |
4074553c678b
8005522: use fast-string instructions on x86 for zeroing
kvn
parents:
14837
diff
changeset
|
5313 |
} else { |
4074553c678b
8005522: use fast-string instructions on x86 for zeroing
kvn
parents:
14837
diff
changeset
|
5314 |
NOT_LP64(shlptr(cnt,1);) // convert to number of dwords for 32-bit VM |
4074553c678b
8005522: use fast-string instructions on x86 for zeroing
kvn
parents:
14837
diff
changeset
|
5315 |
rep_stos(); |
4074553c678b
8005522: use fast-string instructions on x86 for zeroing
kvn
parents:
14837
diff
changeset
|
5316 |
} |
4074553c678b
8005522: use fast-string instructions on x86 for zeroing
kvn
parents:
14837
diff
changeset
|
5317 |
} |
14626 | 5318 |
|
5319 |
// IndexOf for constant substrings with size >= 8 chars |
|
5320 |
// which don't need to be loaded through stack. |
|
5321 |
void MacroAssembler::string_indexofC8(Register str1, Register str2, |
|
5322 |
Register cnt1, Register cnt2, |
|
5323 |
int int_cnt2, Register result, |
|
5324 |
XMMRegister vec, Register tmp) { |
|
5325 |
ShortBranchVerifier sbv(this); |
|
5326 |
assert(UseSSE42Intrinsics, "SSE4.2 is required"); |
|
5327 |
||
5328 |
// This method uses pcmpestri inxtruction with bound registers |
|
5329 |
// inputs: |
|
5330 |
// xmm - substring |
|
5331 |
// rax - substring length (elements count) |
|
5332 |
// mem - scanned string |
|
5333 |
// rdx - string length (elements count) |
|
5334 |
// 0xd - mode: 1100 (substring search) + 01 (unsigned shorts) |
|
5335 |
// outputs: |
|
5336 |
// rcx - matched index in string |
|
5337 |
assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri"); |
|
5338 |
||
5339 |
Label RELOAD_SUBSTR, SCAN_TO_SUBSTR, SCAN_SUBSTR, |
|
5340 |
RET_FOUND, RET_NOT_FOUND, EXIT, FOUND_SUBSTR, |
|
5341 |
MATCH_SUBSTR_HEAD, RELOAD_STR, FOUND_CANDIDATE; |
|
5342 |
||
5343 |
// Note, inline_string_indexOf() generates checks: |
|
5344 |
// if (substr.count > string.count) return -1; |
|
5345 |
// if (substr.count == 0) return 0; |
|
5346 |
assert(int_cnt2 >= 8, "this code isused only for cnt2 >= 8 chars"); |
|
5347 |
||
5348 |
// Load substring. |
|
5349 |
movdqu(vec, Address(str2, 0)); |
|
5350 |
movl(cnt2, int_cnt2); |
|
5351 |
movptr(result, str1); // string addr |
|
5352 |
||
5353 |
if (int_cnt2 > 8) { |
|
5354 |
jmpb(SCAN_TO_SUBSTR); |
|
5355 |
||
5356 |
// Reload substr for rescan, this code |
|
5357 |
// is executed only for large substrings (> 8 chars) |
|
5358 |
bind(RELOAD_SUBSTR); |
|
5359 |
movdqu(vec, Address(str2, 0)); |
|
5360 |
negptr(cnt2); // Jumped here with negative cnt2, convert to positive |
|
5361 |
||
5362 |
bind(RELOAD_STR); |
|
5363 |
// We came here after the beginning of the substring was |
|
5364 |
// matched but the rest of it was not so we need to search |
|
5365 |
// again. Start from the next element after the previous match. |
|
5366 |
||
5367 |
// cnt2 is number of substring reminding elements and |
|
5368 |
// cnt1 is number of string reminding elements when cmp failed. |
|
5369 |
// Restored cnt1 = cnt1 - cnt2 + int_cnt2 |
|
5370 |
subl(cnt1, cnt2); |
|
5371 |
addl(cnt1, int_cnt2); |
|
5372 |
movl(cnt2, int_cnt2); // Now restore cnt2 |
|
5373 |
||
5374 |
decrementl(cnt1); // Shift to next element |
|
5375 |
cmpl(cnt1, cnt2); |
|
5376 |
jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring |
|
5377 |
||
5378 |
addptr(result, 2); |
|
5379 |
||
5380 |
} // (int_cnt2 > 8) |
|
5381 |
||
5382 |
// Scan string for start of substr in 16-byte vectors |
|
5383 |
bind(SCAN_TO_SUBSTR); |
|
5384 |
pcmpestri(vec, Address(result, 0), 0x0d); |
|
5385 |
jccb(Assembler::below, FOUND_CANDIDATE); // CF == 1 |
|
5386 |
subl(cnt1, 8); |
|
5387 |
jccb(Assembler::lessEqual, RET_NOT_FOUND); // Scanned full string |
|
5388 |
cmpl(cnt1, cnt2); |
|
5389 |
jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring |
|
5390 |
addptr(result, 16); |
|
5391 |
jmpb(SCAN_TO_SUBSTR); |
|
5392 |
||
5393 |
// Found a potential substr |
|
5394 |
bind(FOUND_CANDIDATE); |
|
5395 |
// Matched whole vector if first element matched (tmp(rcx) == 0). |
|
5396 |
if (int_cnt2 == 8) { |
|
5397 |
jccb(Assembler::overflow, RET_FOUND); // OF == 1 |
|
5398 |
} else { // int_cnt2 > 8 |
|
5399 |
jccb(Assembler::overflow, FOUND_SUBSTR); |
|
5400 |
} |
|
5401 |
// After pcmpestri tmp(rcx) contains matched element index |
|
5402 |
// Compute start addr of substr |
|
5403 |
lea(result, Address(result, tmp, Address::times_2)); |
|
5404 |
||
5405 |
// Make sure string is still long enough |
|
5406 |
subl(cnt1, tmp); |
|
5407 |
cmpl(cnt1, cnt2); |
|
5408 |
if (int_cnt2 == 8) { |
|
5409 |
jccb(Assembler::greaterEqual, SCAN_TO_SUBSTR); |
|
5410 |
} else { // int_cnt2 > 8 |
|
5411 |
jccb(Assembler::greaterEqual, MATCH_SUBSTR_HEAD); |
|
5412 |
} |
|
5413 |
// Left less then substring. |
|
5414 |
||
5415 |
bind(RET_NOT_FOUND); |
|
5416 |
movl(result, -1); |
|
5417 |
jmpb(EXIT); |
|
5418 |
||
5419 |
if (int_cnt2 > 8) { |
|
5420 |
// This code is optimized for the case when whole substring |
|
5421 |
// is matched if its head is matched. |
|
5422 |
bind(MATCH_SUBSTR_HEAD); |
|
5423 |
pcmpestri(vec, Address(result, 0), 0x0d); |
|
5424 |
// Reload only string if does not match |
|
5425 |
jccb(Assembler::noOverflow, RELOAD_STR); // OF == 0 |
|
5426 |
||
5427 |
Label CONT_SCAN_SUBSTR; |
|
5428 |
// Compare the rest of substring (> 8 chars). |
|
5429 |
bind(FOUND_SUBSTR); |
|
5430 |
// First 8 chars are already matched. |
|
5431 |
negptr(cnt2); |
|
5432 |
addptr(cnt2, 8); |
|
5433 |
||
5434 |
bind(SCAN_SUBSTR); |
|
5435 |
subl(cnt1, 8); |
|
5436 |
cmpl(cnt2, -8); // Do not read beyond substring |
|
5437 |
jccb(Assembler::lessEqual, CONT_SCAN_SUBSTR); |
|
5438 |
// Back-up strings to avoid reading beyond substring: |
|
5439 |
// cnt1 = cnt1 - cnt2 + 8 |
|
5440 |
addl(cnt1, cnt2); // cnt2 is negative |
|
5441 |
addl(cnt1, 8); |
|
5442 |
movl(cnt2, 8); negptr(cnt2); |
|
5443 |
bind(CONT_SCAN_SUBSTR); |
|
5444 |
if (int_cnt2 < (int)G) { |
|
5445 |
movdqu(vec, Address(str2, cnt2, Address::times_2, int_cnt2*2)); |
|
5446 |
pcmpestri(vec, Address(result, cnt2, Address::times_2, int_cnt2*2), 0x0d); |
|
5447 |
} else { |
|
5448 |
// calculate index in register to avoid integer overflow (int_cnt2*2) |
|
5449 |
movl(tmp, int_cnt2); |
|
5450 |
addptr(tmp, cnt2); |
|
5451 |
movdqu(vec, Address(str2, tmp, Address::times_2, 0)); |
|
5452 |
pcmpestri(vec, Address(result, tmp, Address::times_2, 0), 0x0d); |
|
5453 |
} |
|
5454 |
// Need to reload strings pointers if not matched whole vector |
|
5455 |
jcc(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0 |
|
5456 |
addptr(cnt2, 8); |
|
5457 |
jcc(Assembler::negative, SCAN_SUBSTR); |
|
5458 |
// Fall through if found full substring |
|
5459 |
||
5460 |
} // (int_cnt2 > 8) |
|
5461 |
||
5462 |
bind(RET_FOUND); |
|
5463 |
// Found result if we matched full small substring. |
|
5464 |
// Compute substr offset |
|
5465 |
subptr(result, str1); |
|
5466 |
shrl(result, 1); // index |
|
5467 |
bind(EXIT); |
|
5468 |
||
5469 |
} // string_indexofC8 |
|
5470 |
||
5471 |
// Small strings are loaded through stack if they cross page boundary. |
|
5472 |
void MacroAssembler::string_indexof(Register str1, Register str2, |
|
5473 |
Register cnt1, Register cnt2, |
|
5474 |
int int_cnt2, Register result, |
|
5475 |
XMMRegister vec, Register tmp) { |
|
5476 |
ShortBranchVerifier sbv(this); |
|
5477 |
assert(UseSSE42Intrinsics, "SSE4.2 is required"); |
|
5478 |
// |
|
5479 |
// int_cnt2 is length of small (< 8 chars) constant substring |
|
5480 |
// or (-1) for non constant substring in which case its length |
|
5481 |
// is in cnt2 register. |
|
5482 |
// |
|
5483 |
// Note, inline_string_indexOf() generates checks: |
|
5484 |
// if (substr.count > string.count) return -1; |
|
5485 |
// if (substr.count == 0) return 0; |
|
5486 |
// |
|
5487 |
assert(int_cnt2 == -1 || (0 < int_cnt2 && int_cnt2 < 8), "should be != 0"); |
|
5488 |
||
5489 |
// This method uses pcmpestri inxtruction with bound registers |
|
5490 |
// inputs: |
|
5491 |
// xmm - substring |
|
5492 |
// rax - substring length (elements count) |
|
5493 |
// mem - scanned string |
|
5494 |
// rdx - string length (elements count) |
|
5495 |
// 0xd - mode: 1100 (substring search) + 01 (unsigned shorts) |
|
5496 |
// outputs: |
|
5497 |
// rcx - matched index in string |
|
5498 |
assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri"); |
|
5499 |
||
5500 |
Label RELOAD_SUBSTR, SCAN_TO_SUBSTR, SCAN_SUBSTR, ADJUST_STR, |
|
5501 |
RET_FOUND, RET_NOT_FOUND, CLEANUP, FOUND_SUBSTR, |
|
5502 |
FOUND_CANDIDATE; |
|
5503 |
||
5504 |
{ //======================================================== |
|
5505 |
// We don't know where these strings are located |
|
5506 |
// and we can't read beyond them. Load them through stack. |
|
5507 |
Label BIG_STRINGS, CHECK_STR, COPY_SUBSTR, COPY_STR; |
|
5508 |
||
5509 |
movptr(tmp, rsp); // save old SP |
|
5510 |
||
5511 |
if (int_cnt2 > 0) { // small (< 8 chars) constant substring |
|
5512 |
if (int_cnt2 == 1) { // One char |
|
5513 |
load_unsigned_short(result, Address(str2, 0)); |
|
5514 |
movdl(vec, result); // move 32 bits |
|
5515 |
} else if (int_cnt2 == 2) { // Two chars |
|
5516 |
movdl(vec, Address(str2, 0)); // move 32 bits |
|
5517 |
} else if (int_cnt2 == 4) { // Four chars |
|
5518 |
movq(vec, Address(str2, 0)); // move 64 bits |
|
5519 |
} else { // cnt2 = { 3, 5, 6, 7 } |
|
5520 |
// Array header size is 12 bytes in 32-bit VM |
|
5521 |
// + 6 bytes for 3 chars == 18 bytes, |
|
5522 |
// enough space to load vec and shift. |
|
5523 |
assert(HeapWordSize*TypeArrayKlass::header_size() >= 12,"sanity"); |
|
5524 |
movdqu(vec, Address(str2, (int_cnt2*2)-16)); |
|
5525 |
psrldq(vec, 16-(int_cnt2*2)); |
|
5526 |
} |
|
5527 |
} else { // not constant substring |
|
5528 |
cmpl(cnt2, 8); |
|
5529 |
jccb(Assembler::aboveEqual, BIG_STRINGS); // Both strings are big enough |
|
5530 |
||
5531 |
// We can read beyond string if srt+16 does not cross page boundary |
|
5532 |
// since heaps are aligned and mapped by pages. |
|
5533 |
assert(os::vm_page_size() < (int)G, "default page should be small"); |
|
5534 |
movl(result, str2); // We need only low 32 bits |
|
5535 |
andl(result, (os::vm_page_size()-1)); |
|
5536 |
cmpl(result, (os::vm_page_size()-16)); |
|
5537 |
jccb(Assembler::belowEqual, CHECK_STR); |
|
5538 |
||
5539 |
// Move small strings to stack to allow load 16 bytes into vec. |
|
5540 |
subptr(rsp, 16); |
|
5541 |
int stk_offset = wordSize-2; |
|
5542 |
push(cnt2); |
|
5543 |
||
5544 |
bind(COPY_SUBSTR); |
|
5545 |
load_unsigned_short(result, Address(str2, cnt2, Address::times_2, -2)); |
|
5546 |
movw(Address(rsp, cnt2, Address::times_2, stk_offset), result); |
|
5547 |
decrement(cnt2); |
|
5548 |
jccb(Assembler::notZero, COPY_SUBSTR); |
|
5549 |
||
5550 |
pop(cnt2); |
|
5551 |
movptr(str2, rsp); // New substring address |
|
5552 |
} // non constant |
|
5553 |
||
5554 |
bind(CHECK_STR); |
|
5555 |
cmpl(cnt1, 8); |
|
5556 |
jccb(Assembler::aboveEqual, BIG_STRINGS); |
|
5557 |
||
5558 |
// Check cross page boundary. |
|
5559 |
movl(result, str1); // We need only low 32 bits |
|
5560 |
andl(result, (os::vm_page_size()-1)); |
|
5561 |
cmpl(result, (os::vm_page_size()-16)); |
|
5562 |
jccb(Assembler::belowEqual, BIG_STRINGS); |
|
5563 |
||
5564 |
subptr(rsp, 16); |
|
5565 |
int stk_offset = -2; |
|
5566 |
if (int_cnt2 < 0) { // not constant |
|
5567 |
push(cnt2); |
|
5568 |
stk_offset += wordSize; |
|
5569 |
} |
|
5570 |
movl(cnt2, cnt1); |
|
5571 |
||
5572 |
bind(COPY_STR); |
|
5573 |
load_unsigned_short(result, Address(str1, cnt2, Address::times_2, -2)); |
|
5574 |
movw(Address(rsp, cnt2, Address::times_2, stk_offset), result); |
|
5575 |
decrement(cnt2); |
|
5576 |
jccb(Assembler::notZero, COPY_STR); |
|
5577 |
||
5578 |
if (int_cnt2 < 0) { // not constant |
|
5579 |
pop(cnt2); |
|
5580 |
} |
|
5581 |
movptr(str1, rsp); // New string address |
|
5582 |
||
5583 |
bind(BIG_STRINGS); |
|
5584 |
// Load substring. |
|
5585 |
if (int_cnt2 < 0) { // -1 |
|
5586 |
movdqu(vec, Address(str2, 0)); |
|
5587 |
push(cnt2); // substr count |
|
5588 |
push(str2); // substr addr |
|
5589 |
push(str1); // string addr |
|
5590 |
} else { |
|
5591 |
// Small (< 8 chars) constant substrings are loaded already. |
|
5592 |
movl(cnt2, int_cnt2); |
|
5593 |
} |
|
5594 |
push(tmp); // original SP |
|
5595 |
||
5596 |
} // Finished loading |
|
5597 |
||
5598 |
//======================================================== |
|
5599 |
// Start search |
|
5600 |
// |
|
5601 |
||
5602 |
movptr(result, str1); // string addr |
|
5603 |
||
5604 |
if (int_cnt2 < 0) { // Only for non constant substring |
|
5605 |
jmpb(SCAN_TO_SUBSTR); |
|
5606 |
||
5607 |
// SP saved at sp+0 |
|
5608 |
// String saved at sp+1*wordSize |
|
5609 |
// Substr saved at sp+2*wordSize |
|
5610 |
// Substr count saved at sp+3*wordSize |
|
5611 |
||
5612 |
// Reload substr for rescan, this code |
|
5613 |
// is executed only for large substrings (> 8 chars) |
|
5614 |
bind(RELOAD_SUBSTR); |
|
5615 |
movptr(str2, Address(rsp, 2*wordSize)); |
|
5616 |
movl(cnt2, Address(rsp, 3*wordSize)); |
|
5617 |
movdqu(vec, Address(str2, 0)); |
|
5618 |
// We came here after the beginning of the substring was |
|
5619 |
// matched but the rest of it was not so we need to search |
|
5620 |
// again. Start from the next element after the previous match. |
|
5621 |
subptr(str1, result); // Restore counter |
|
5622 |
shrl(str1, 1); |
|
5623 |
addl(cnt1, str1); |
|
5624 |
decrementl(cnt1); // Shift to next element |
|
5625 |
cmpl(cnt1, cnt2); |
|
5626 |
jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring |
|
5627 |
||
5628 |
addptr(result, 2); |
|
5629 |
} // non constant |
|
5630 |
||
5631 |
// Scan string for start of substr in 16-byte vectors |
|
5632 |
bind(SCAN_TO_SUBSTR); |
|
5633 |
assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri"); |
|
5634 |
pcmpestri(vec, Address(result, 0), 0x0d); |
|
5635 |
jccb(Assembler::below, FOUND_CANDIDATE); // CF == 1 |
|
5636 |
subl(cnt1, 8); |
|
5637 |
jccb(Assembler::lessEqual, RET_NOT_FOUND); // Scanned full string |
|
5638 |
cmpl(cnt1, cnt2); |
|
5639 |
jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring |
|
5640 |
addptr(result, 16); |
|
5641 |
||
5642 |
bind(ADJUST_STR); |
|
5643 |
cmpl(cnt1, 8); // Do not read beyond string |
|
5644 |
jccb(Assembler::greaterEqual, SCAN_TO_SUBSTR); |
|
5645 |
// Back-up string to avoid reading beyond string. |
|
5646 |
lea(result, Address(result, cnt1, Address::times_2, -16)); |
|
5647 |
movl(cnt1, 8); |
|
5648 |
jmpb(SCAN_TO_SUBSTR); |
|
5649 |
||
5650 |
// Found a potential substr |
|
5651 |
bind(FOUND_CANDIDATE); |
|
5652 |
// After pcmpestri tmp(rcx) contains matched element index |
|
5653 |
||
5654 |
// Make sure string is still long enough |
|
5655 |
subl(cnt1, tmp); |
|
5656 |
cmpl(cnt1, cnt2); |
|
5657 |
jccb(Assembler::greaterEqual, FOUND_SUBSTR); |
|
5658 |
// Left less then substring. |
|
5659 |
||
5660 |
bind(RET_NOT_FOUND); |
|
5661 |
movl(result, -1); |
|
5662 |
jmpb(CLEANUP); |
|
5663 |
||
5664 |
bind(FOUND_SUBSTR); |
|
5665 |
// Compute start addr of substr |
|
5666 |
lea(result, Address(result, tmp, Address::times_2)); |
|
5667 |
||
5668 |
if (int_cnt2 > 0) { // Constant substring |
|
5669 |
// Repeat search for small substring (< 8 chars) |
|
5670 |
// from new point without reloading substring. |
|
5671 |
// Have to check that we don't read beyond string. |
|
5672 |
cmpl(tmp, 8-int_cnt2); |
|
5673 |
jccb(Assembler::greater, ADJUST_STR); |
|
5674 |
// Fall through if matched whole substring. |
|
5675 |
} else { // non constant |
|
5676 |
assert(int_cnt2 == -1, "should be != 0"); |
|
5677 |
||
5678 |
addl(tmp, cnt2); |
|
5679 |
// Found result if we matched whole substring. |
|
5680 |
cmpl(tmp, 8); |
|
5681 |
jccb(Assembler::lessEqual, RET_FOUND); |
|
5682 |
||
5683 |
// Repeat search for small substring (<= 8 chars) |
|
5684 |
// from new point 'str1' without reloading substring. |
|
5685 |
cmpl(cnt2, 8); |
|
5686 |
// Have to check that we don't read beyond string. |
|
5687 |
jccb(Assembler::lessEqual, ADJUST_STR); |
|
5688 |
||
5689 |
Label CHECK_NEXT, CONT_SCAN_SUBSTR, RET_FOUND_LONG; |
|
5690 |
// Compare the rest of substring (> 8 chars). |
|
5691 |
movptr(str1, result); |
|
5692 |
||
5693 |
cmpl(tmp, cnt2); |
|
5694 |
// First 8 chars are already matched. |
|
5695 |
jccb(Assembler::equal, CHECK_NEXT); |
|
5696 |
||
5697 |
bind(SCAN_SUBSTR); |
|
5698 |
pcmpestri(vec, Address(str1, 0), 0x0d); |
|
5699 |
// Need to reload strings pointers if not matched whole vector |
|
5700 |
jcc(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0 |
|
5701 |
||
5702 |
bind(CHECK_NEXT); |
|
5703 |
subl(cnt2, 8); |
|
5704 |
jccb(Assembler::lessEqual, RET_FOUND_LONG); // Found full substring |
|
5705 |
addptr(str1, 16); |
|
5706 |
addptr(str2, 16); |
|
5707 |
subl(cnt1, 8); |
|
5708 |
cmpl(cnt2, 8); // Do not read beyond substring |
|
5709 |
jccb(Assembler::greaterEqual, CONT_SCAN_SUBSTR); |
|
5710 |
// Back-up strings to avoid reading beyond substring. |
|
5711 |
lea(str2, Address(str2, cnt2, Address::times_2, -16)); |
|
5712 |
lea(str1, Address(str1, cnt2, Address::times_2, -16)); |
|
5713 |
subl(cnt1, cnt2); |
|
5714 |
movl(cnt2, 8); |
|
5715 |
addl(cnt1, 8); |
|
5716 |
bind(CONT_SCAN_SUBSTR); |
|
5717 |
movdqu(vec, Address(str2, 0)); |
|
5718 |
jmpb(SCAN_SUBSTR); |
|
5719 |
||
5720 |
bind(RET_FOUND_LONG); |
|
5721 |
movptr(str1, Address(rsp, wordSize)); |
|
5722 |
} // non constant |
|
5723 |
||
5724 |
bind(RET_FOUND); |
|
5725 |
// Compute substr offset |
|
5726 |
subptr(result, str1); |
|
5727 |
shrl(result, 1); // index |
|
5728 |
||
5729 |
bind(CLEANUP); |
|
5730 |
pop(rsp); // restore SP |
|
5731 |
||
5732 |
} // string_indexof |
|
5733 |
||
5734 |
// Compare strings. |
|
5735 |
void MacroAssembler::string_compare(Register str1, Register str2, |
|
5736 |
Register cnt1, Register cnt2, Register result, |
|
5737 |
XMMRegister vec1) { |
|
5738 |
ShortBranchVerifier sbv(this); |
|
5739 |
Label LENGTH_DIFF_LABEL, POP_LABEL, DONE_LABEL, WHILE_HEAD_LABEL; |
|
5740 |
||
5741 |
// Compute the minimum of the string lengths and the |
|
5742 |
// difference of the string lengths (stack). |
|
5743 |
// Do the conditional move stuff |
|
5744 |
movl(result, cnt1); |
|
5745 |
subl(cnt1, cnt2); |
|
5746 |
push(cnt1); |
|
5747 |
cmov32(Assembler::lessEqual, cnt2, result); |
|
5748 |
||
5749 |
// Is the minimum length zero? |
|
5750 |
testl(cnt2, cnt2); |
|
5751 |
jcc(Assembler::zero, LENGTH_DIFF_LABEL); |
|
5752 |
||
15117
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5753 |
// Compare first characters |
14626 | 5754 |
load_unsigned_short(result, Address(str1, 0)); |
5755 |
load_unsigned_short(cnt1, Address(str2, 0)); |
|
5756 |
subl(result, cnt1); |
|
5757 |
jcc(Assembler::notZero, POP_LABEL); |
|
15117
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5758 |
cmpl(cnt2, 1); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5759 |
jcc(Assembler::equal, LENGTH_DIFF_LABEL); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5760 |
|
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5761 |
// Check if the strings start at the same location. |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5762 |
cmpptr(str1, str2); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5763 |
jcc(Assembler::equal, LENGTH_DIFF_LABEL); |
14626 | 5764 |
|
5765 |
Address::ScaleFactor scale = Address::times_2; |
|
5766 |
int stride = 8; |
|
5767 |
||
15612
d4073ad8ce3d
8007708: compiler/6855215 assert(VM_Version::supports_sse4_2())
kvn
parents:
15483
diff
changeset
|
5768 |
if (UseAVX >= 2 && UseSSE42Intrinsics) { |
15117
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5769 |
Label COMPARE_WIDE_VECTORS, VECTOR_NOT_EQUAL, COMPARE_WIDE_TAIL, COMPARE_SMALL_STR; |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5770 |
Label COMPARE_WIDE_VECTORS_LOOP, COMPARE_16_CHARS, COMPARE_INDEX_CHAR; |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5771 |
Label COMPARE_TAIL_LONG; |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5772 |
int pcmpmask = 0x19; |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5773 |
|
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5774 |
// Setup to compare 16-chars (32-bytes) vectors, |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5775 |
// start from first character again because it has aligned address. |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5776 |
int stride2 = 16; |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5777 |
int adr_stride = stride << scale; |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5778 |
int adr_stride2 = stride2 << scale; |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5779 |
|
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5780 |
assert(result == rax && cnt2 == rdx && cnt1 == rcx, "pcmpestri"); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5781 |
// rax and rdx are used by pcmpestri as elements counters |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5782 |
movl(result, cnt2); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5783 |
andl(cnt2, ~(stride2-1)); // cnt2 holds the vector count |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5784 |
jcc(Assembler::zero, COMPARE_TAIL_LONG); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5785 |
|
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5786 |
// fast path : compare first 2 8-char vectors. |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5787 |
bind(COMPARE_16_CHARS); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5788 |
movdqu(vec1, Address(str1, 0)); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5789 |
pcmpestri(vec1, Address(str2, 0), pcmpmask); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5790 |
jccb(Assembler::below, COMPARE_INDEX_CHAR); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5791 |
|
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5792 |
movdqu(vec1, Address(str1, adr_stride)); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5793 |
pcmpestri(vec1, Address(str2, adr_stride), pcmpmask); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5794 |
jccb(Assembler::aboveEqual, COMPARE_WIDE_VECTORS); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5795 |
addl(cnt1, stride); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5796 |
|
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5797 |
// Compare the characters at index in cnt1 |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5798 |
bind(COMPARE_INDEX_CHAR); //cnt1 has the offset of the mismatching character |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5799 |
load_unsigned_short(result, Address(str1, cnt1, scale)); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5800 |
load_unsigned_short(cnt2, Address(str2, cnt1, scale)); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5801 |
subl(result, cnt2); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5802 |
jmp(POP_LABEL); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5803 |
|
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5804 |
// Setup the registers to start vector comparison loop |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5805 |
bind(COMPARE_WIDE_VECTORS); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5806 |
lea(str1, Address(str1, result, scale)); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5807 |
lea(str2, Address(str2, result, scale)); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5808 |
subl(result, stride2); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5809 |
subl(cnt2, stride2); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5810 |
jccb(Assembler::zero, COMPARE_WIDE_TAIL); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5811 |
negptr(result); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5812 |
|
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5813 |
// In a loop, compare 16-chars (32-bytes) at once using (vpxor+vptest) |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5814 |
bind(COMPARE_WIDE_VECTORS_LOOP); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5815 |
vmovdqu(vec1, Address(str1, result, scale)); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5816 |
vpxor(vec1, Address(str2, result, scale)); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5817 |
vptest(vec1, vec1); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5818 |
jccb(Assembler::notZero, VECTOR_NOT_EQUAL); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5819 |
addptr(result, stride2); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5820 |
subl(cnt2, stride2); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5821 |
jccb(Assembler::notZero, COMPARE_WIDE_VECTORS_LOOP); |
16624
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
5822 |
// clean upper bits of YMM registers |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
5823 |
vzeroupper(); |
15117
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5824 |
|
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5825 |
// compare wide vectors tail |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5826 |
bind(COMPARE_WIDE_TAIL); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5827 |
testptr(result, result); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5828 |
jccb(Assembler::zero, LENGTH_DIFF_LABEL); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5829 |
|
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5830 |
movl(result, stride2); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5831 |
movl(cnt2, result); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5832 |
negptr(result); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5833 |
jmpb(COMPARE_WIDE_VECTORS_LOOP); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5834 |
|
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5835 |
// Identifies the mismatching (higher or lower)16-bytes in the 32-byte vectors. |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5836 |
bind(VECTOR_NOT_EQUAL); |
16624
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
5837 |
// clean upper bits of YMM registers |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
5838 |
vzeroupper(); |
15117
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5839 |
lea(str1, Address(str1, result, scale)); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5840 |
lea(str2, Address(str2, result, scale)); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5841 |
jmp(COMPARE_16_CHARS); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5842 |
|
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5843 |
// Compare tail chars, length between 1 to 15 chars |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5844 |
bind(COMPARE_TAIL_LONG); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5845 |
movl(cnt2, result); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5846 |
cmpl(cnt2, stride); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5847 |
jccb(Assembler::less, COMPARE_SMALL_STR); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5848 |
|
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5849 |
movdqu(vec1, Address(str1, 0)); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5850 |
pcmpestri(vec1, Address(str2, 0), pcmpmask); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5851 |
jcc(Assembler::below, COMPARE_INDEX_CHAR); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5852 |
subptr(cnt2, stride); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5853 |
jccb(Assembler::zero, LENGTH_DIFF_LABEL); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5854 |
lea(str1, Address(str1, result, scale)); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5855 |
lea(str2, Address(str2, result, scale)); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5856 |
negptr(cnt2); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5857 |
jmpb(WHILE_HEAD_LABEL); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5858 |
|
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5859 |
bind(COMPARE_SMALL_STR); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5860 |
} else if (UseSSE42Intrinsics) { |
14626 | 5861 |
Label COMPARE_WIDE_VECTORS, VECTOR_NOT_EQUAL, COMPARE_TAIL; |
5862 |
int pcmpmask = 0x19; |
|
15117
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5863 |
// Setup to compare 8-char (16-byte) vectors, |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5864 |
// start from first character again because it has aligned address. |
14626 | 5865 |
movl(result, cnt2); |
5866 |
andl(cnt2, ~(stride - 1)); // cnt2 holds the vector count |
|
5867 |
jccb(Assembler::zero, COMPARE_TAIL); |
|
5868 |
||
5869 |
lea(str1, Address(str1, result, scale)); |
|
5870 |
lea(str2, Address(str2, result, scale)); |
|
5871 |
negptr(result); |
|
5872 |
||
5873 |
// pcmpestri |
|
5874 |
// inputs: |
|
5875 |
// vec1- substring |
|
5876 |
// rax - negative string length (elements count) |
|
5877 |
// mem - scaned string |
|
5878 |
// rdx - string length (elements count) |
|
5879 |
// pcmpmask - cmp mode: 11000 (string compare with negated result) |
|
5880 |
// + 00 (unsigned bytes) or + 01 (unsigned shorts) |
|
5881 |
// outputs: |
|
5882 |
// rcx - first mismatched element index |
|
5883 |
assert(result == rax && cnt2 == rdx && cnt1 == rcx, "pcmpestri"); |
|
5884 |
||
5885 |
bind(COMPARE_WIDE_VECTORS); |
|
5886 |
movdqu(vec1, Address(str1, result, scale)); |
|
5887 |
pcmpestri(vec1, Address(str2, result, scale), pcmpmask); |
|
5888 |
// After pcmpestri cnt1(rcx) contains mismatched element index |
|
5889 |
||
5890 |
jccb(Assembler::below, VECTOR_NOT_EQUAL); // CF==1 |
|
5891 |
addptr(result, stride); |
|
5892 |
subptr(cnt2, stride); |
|
5893 |
jccb(Assembler::notZero, COMPARE_WIDE_VECTORS); |
|
5894 |
||
5895 |
// compare wide vectors tail |
|
15117
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5896 |
testptr(result, result); |
14626 | 5897 |
jccb(Assembler::zero, LENGTH_DIFF_LABEL); |
5898 |
||
5899 |
movl(cnt2, stride); |
|
5900 |
movl(result, stride); |
|
5901 |
negptr(result); |
|
5902 |
movdqu(vec1, Address(str1, result, scale)); |
|
5903 |
pcmpestri(vec1, Address(str2, result, scale), pcmpmask); |
|
5904 |
jccb(Assembler::aboveEqual, LENGTH_DIFF_LABEL); |
|
5905 |
||
5906 |
// Mismatched characters in the vectors |
|
5907 |
bind(VECTOR_NOT_EQUAL); |
|
15117
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5908 |
addptr(cnt1, result); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5909 |
load_unsigned_short(result, Address(str1, cnt1, scale)); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5910 |
load_unsigned_short(cnt2, Address(str2, cnt1, scale)); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5911 |
subl(result, cnt2); |
14626 | 5912 |
jmpb(POP_LABEL); |
5913 |
||
5914 |
bind(COMPARE_TAIL); // limit is zero |
|
5915 |
movl(cnt2, result); |
|
5916 |
// Fallthru to tail compare |
|
5917 |
} |
|
5918 |
// Shift str2 and str1 to the end of the arrays, negate min |
|
15117
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5919 |
lea(str1, Address(str1, cnt2, scale)); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5920 |
lea(str2, Address(str2, cnt2, scale)); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5921 |
decrementl(cnt2); // first character was compared already |
14626 | 5922 |
negptr(cnt2); |
5923 |
||
5924 |
// Compare the rest of the elements |
|
5925 |
bind(WHILE_HEAD_LABEL); |
|
5926 |
load_unsigned_short(result, Address(str1, cnt2, scale, 0)); |
|
5927 |
load_unsigned_short(cnt1, Address(str2, cnt2, scale, 0)); |
|
5928 |
subl(result, cnt1); |
|
5929 |
jccb(Assembler::notZero, POP_LABEL); |
|
5930 |
increment(cnt2); |
|
5931 |
jccb(Assembler::notZero, WHILE_HEAD_LABEL); |
|
5932 |
||
5933 |
// Strings are equal up to min length. Return the length difference. |
|
5934 |
bind(LENGTH_DIFF_LABEL); |
|
5935 |
pop(result); |
|
5936 |
jmpb(DONE_LABEL); |
|
5937 |
||
5938 |
// Discard the stored length difference |
|
5939 |
bind(POP_LABEL); |
|
5940 |
pop(cnt1); |
|
5941 |
||
5942 |
// That's it |
|
5943 |
bind(DONE_LABEL); |
|
5944 |
} |
|
5945 |
||
5946 |
// Compare char[] arrays aligned to 4 bytes or substrings. |
|
5947 |
void MacroAssembler::char_arrays_equals(bool is_array_equ, Register ary1, Register ary2, |
|
5948 |
Register limit, Register result, Register chr, |
|
5949 |
XMMRegister vec1, XMMRegister vec2) { |
|
5950 |
ShortBranchVerifier sbv(this); |
|
5951 |
Label TRUE_LABEL, FALSE_LABEL, DONE, COMPARE_VECTORS, COMPARE_CHAR; |
|
5952 |
||
5953 |
int length_offset = arrayOopDesc::length_offset_in_bytes(); |
|
5954 |
int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR); |
|
5955 |
||
5956 |
// Check the input args |
|
5957 |
cmpptr(ary1, ary2); |
|
5958 |
jcc(Assembler::equal, TRUE_LABEL); |
|
5959 |
||
5960 |
if (is_array_equ) { |
|
5961 |
// Need additional checks for arrays_equals. |
|
5962 |
testptr(ary1, ary1); |
|
5963 |
jcc(Assembler::zero, FALSE_LABEL); |
|
5964 |
testptr(ary2, ary2); |
|
5965 |
jcc(Assembler::zero, FALSE_LABEL); |
|
5966 |
||
5967 |
// Check the lengths |
|
5968 |
movl(limit, Address(ary1, length_offset)); |
|
5969 |
cmpl(limit, Address(ary2, length_offset)); |
|
5970 |
jcc(Assembler::notEqual, FALSE_LABEL); |
|
5971 |
} |
|
5972 |
||
5973 |
// count == 0 |
|
5974 |
testl(limit, limit); |
|
5975 |
jcc(Assembler::zero, TRUE_LABEL); |
|
5976 |
||
5977 |
if (is_array_equ) { |
|
5978 |
// Load array address |
|
5979 |
lea(ary1, Address(ary1, base_offset)); |
|
5980 |
lea(ary2, Address(ary2, base_offset)); |
|
5981 |
} |
|
5982 |
||
5983 |
shll(limit, 1); // byte count != 0 |
|
5984 |
movl(result, limit); // copy |
|
5985 |
||
15117
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5986 |
if (UseAVX >= 2) { |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5987 |
// With AVX2, use 32-byte vector compare |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5988 |
Label COMPARE_WIDE_VECTORS, COMPARE_TAIL; |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5989 |
|
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5990 |
// Compare 32-byte vectors |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5991 |
andl(result, 0x0000001e); // tail count (in bytes) |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5992 |
andl(limit, 0xffffffe0); // vector count (in bytes) |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5993 |
jccb(Assembler::zero, COMPARE_TAIL); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5994 |
|
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5995 |
lea(ary1, Address(ary1, limit, Address::times_1)); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5996 |
lea(ary2, Address(ary2, limit, Address::times_1)); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5997 |
negptr(limit); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5998 |
|
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
5999 |
bind(COMPARE_WIDE_VECTORS); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
6000 |
vmovdqu(vec1, Address(ary1, limit, Address::times_1)); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
6001 |
vmovdqu(vec2, Address(ary2, limit, Address::times_1)); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
6002 |
vpxor(vec1, vec2); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
6003 |
|
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
6004 |
vptest(vec1, vec1); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
6005 |
jccb(Assembler::notZero, FALSE_LABEL); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
6006 |
addptr(limit, 32); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
6007 |
jcc(Assembler::notZero, COMPARE_WIDE_VECTORS); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
6008 |
|
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
6009 |
testl(result, result); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
6010 |
jccb(Assembler::zero, TRUE_LABEL); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
6011 |
|
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
6012 |
vmovdqu(vec1, Address(ary1, result, Address::times_1, -32)); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
6013 |
vmovdqu(vec2, Address(ary2, result, Address::times_1, -32)); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
6014 |
vpxor(vec1, vec2); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
6015 |
|
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
6016 |
vptest(vec1, vec1); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
6017 |
jccb(Assembler::notZero, FALSE_LABEL); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
6018 |
jmpb(TRUE_LABEL); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
6019 |
|
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
6020 |
bind(COMPARE_TAIL); // limit is zero |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
6021 |
movl(limit, result); |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
6022 |
// Fallthru to tail compare |
625397df6f4f
8005419: Improve intrinsics code performance on x86 by using AVX2
kvn
parents:
15116
diff
changeset
|
6023 |
} else if (UseSSE42Intrinsics) { |
14626 | 6024 |
// With SSE4.2, use double quad vector compare |
6025 |
Label COMPARE_WIDE_VECTORS, COMPARE_TAIL; |
|
6026 |
||
6027 |
// Compare 16-byte vectors |
|
6028 |
andl(result, 0x0000000e); // tail count (in bytes) |
|
6029 |
andl(limit, 0xfffffff0); // vector count (in bytes) |
|
6030 |
jccb(Assembler::zero, COMPARE_TAIL); |
|
6031 |
||
6032 |
lea(ary1, Address(ary1, limit, Address::times_1)); |
|
6033 |
lea(ary2, Address(ary2, limit, Address::times_1)); |
|
6034 |
negptr(limit); |
|
6035 |
||
6036 |
bind(COMPARE_WIDE_VECTORS); |
|
6037 |
movdqu(vec1, Address(ary1, limit, Address::times_1)); |
|
6038 |
movdqu(vec2, Address(ary2, limit, Address::times_1)); |
|
6039 |
pxor(vec1, vec2); |
|
6040 |
||
6041 |
ptest(vec1, vec1); |
|
6042 |
jccb(Assembler::notZero, FALSE_LABEL); |
|
6043 |
addptr(limit, 16); |
|
6044 |
jcc(Assembler::notZero, COMPARE_WIDE_VECTORS); |
|
6045 |
||
6046 |
testl(result, result); |
|
6047 |
jccb(Assembler::zero, TRUE_LABEL); |
|
6048 |
||
6049 |
movdqu(vec1, Address(ary1, result, Address::times_1, -16)); |
|
6050 |
movdqu(vec2, Address(ary2, result, Address::times_1, -16)); |
|
6051 |
pxor(vec1, vec2); |
|
6052 |
||
6053 |
ptest(vec1, vec1); |
|
6054 |
jccb(Assembler::notZero, FALSE_LABEL); |
|
6055 |
jmpb(TRUE_LABEL); |
|
6056 |
||
6057 |
bind(COMPARE_TAIL); // limit is zero |
|
6058 |
movl(limit, result); |
|
6059 |
// Fallthru to tail compare |
|
6060 |
} |
|
6061 |
||
6062 |
// Compare 4-byte vectors |
|
6063 |
andl(limit, 0xfffffffc); // vector count (in bytes) |
|
6064 |
jccb(Assembler::zero, COMPARE_CHAR); |
|
6065 |
||
6066 |
lea(ary1, Address(ary1, limit, Address::times_1)); |
|
6067 |
lea(ary2, Address(ary2, limit, Address::times_1)); |
|
6068 |
negptr(limit); |
|
6069 |
||
6070 |
bind(COMPARE_VECTORS); |
|
6071 |
movl(chr, Address(ary1, limit, Address::times_1)); |
|
6072 |
cmpl(chr, Address(ary2, limit, Address::times_1)); |
|
6073 |
jccb(Assembler::notEqual, FALSE_LABEL); |
|
6074 |
addptr(limit, 4); |
|
6075 |
jcc(Assembler::notZero, COMPARE_VECTORS); |
|
6076 |
||
6077 |
// Compare trailing char (final 2 bytes), if any |
|
6078 |
bind(COMPARE_CHAR); |
|
6079 |
testl(result, 0x2); // tail char |
|
6080 |
jccb(Assembler::zero, TRUE_LABEL); |
|
6081 |
load_unsigned_short(chr, Address(ary1, 0)); |
|
6082 |
load_unsigned_short(limit, Address(ary2, 0)); |
|
6083 |
cmpl(chr, limit); |
|
6084 |
jccb(Assembler::notEqual, FALSE_LABEL); |
|
6085 |
||
6086 |
bind(TRUE_LABEL); |
|
6087 |
movl(result, 1); // return true |
|
6088 |
jmpb(DONE); |
|
6089 |
||
6090 |
bind(FALSE_LABEL); |
|
6091 |
xorl(result, result); // return false |
|
6092 |
||
6093 |
// That's it |
|
6094 |
bind(DONE); |
|
16624
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
6095 |
if (UseAVX >= 2) { |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
6096 |
// clean upper bits of YMM registers |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
6097 |
vzeroupper(); |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
6098 |
} |
14626 | 6099 |
} |
6100 |
||
6101 |
void MacroAssembler::generate_fill(BasicType t, bool aligned, |
|
6102 |
Register to, Register value, Register count, |
|
6103 |
Register rtmp, XMMRegister xtmp) { |
|
6104 |
ShortBranchVerifier sbv(this); |
|
6105 |
assert_different_registers(to, value, count, rtmp); |
|
6106 |
Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte; |
|
6107 |
Label L_fill_2_bytes, L_fill_4_bytes; |
|
6108 |
||
6109 |
int shift = -1; |
|
6110 |
switch (t) { |
|
6111 |
case T_BYTE: |
|
6112 |
shift = 2; |
|
6113 |
break; |
|
6114 |
case T_SHORT: |
|
6115 |
shift = 1; |
|
6116 |
break; |
|
6117 |
case T_INT: |
|
6118 |
shift = 0; |
|
6119 |
break; |
|
6120 |
default: ShouldNotReachHere(); |
|
6121 |
} |
|
6122 |
||
6123 |
if (t == T_BYTE) { |
|
6124 |
andl(value, 0xff); |
|
6125 |
movl(rtmp, value); |
|
6126 |
shll(rtmp, 8); |
|
6127 |
orl(value, rtmp); |
|
6128 |
} |
|
6129 |
if (t == T_SHORT) { |
|
6130 |
andl(value, 0xffff); |
|
6131 |
} |
|
6132 |
if (t == T_BYTE || t == T_SHORT) { |
|
6133 |
movl(rtmp, value); |
|
6134 |
shll(rtmp, 16); |
|
6135 |
orl(value, rtmp); |
|
6136 |
} |
|
6137 |
||
6138 |
cmpl(count, 2<<shift); // Short arrays (< 8 bytes) fill by element |
|
6139 |
jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp |
|
6140 |
if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) { |
|
6141 |
// align source address at 4 bytes address boundary |
|
6142 |
if (t == T_BYTE) { |
|
6143 |
// One byte misalignment happens only for byte arrays |
|
6144 |
testptr(to, 1); |
|
6145 |
jccb(Assembler::zero, L_skip_align1); |
|
6146 |
movb(Address(to, 0), value); |
|
6147 |
increment(to); |
|
6148 |
decrement(count); |
|
6149 |
BIND(L_skip_align1); |
|
6150 |
} |
|
6151 |
// Two bytes misalignment happens only for byte and short (char) arrays |
|
6152 |
testptr(to, 2); |
|
6153 |
jccb(Assembler::zero, L_skip_align2); |
|
6154 |
movw(Address(to, 0), value); |
|
6155 |
addptr(to, 2); |
|
6156 |
subl(count, 1<<(shift-1)); |
|
6157 |
BIND(L_skip_align2); |
|
6158 |
} |
|
6159 |
if (UseSSE < 2) { |
|
6160 |
Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes; |
|
6161 |
// Fill 32-byte chunks |
|
6162 |
subl(count, 8 << shift); |
|
6163 |
jcc(Assembler::less, L_check_fill_8_bytes); |
|
6164 |
align(16); |
|
6165 |
||
6166 |
BIND(L_fill_32_bytes_loop); |
|
6167 |
||
6168 |
for (int i = 0; i < 32; i += 4) { |
|
6169 |
movl(Address(to, i), value); |
|
6170 |
} |
|
6171 |
||
6172 |
addptr(to, 32); |
|
6173 |
subl(count, 8 << shift); |
|
6174 |
jcc(Assembler::greaterEqual, L_fill_32_bytes_loop); |
|
6175 |
BIND(L_check_fill_8_bytes); |
|
6176 |
addl(count, 8 << shift); |
|
6177 |
jccb(Assembler::zero, L_exit); |
|
6178 |
jmpb(L_fill_8_bytes); |
|
6179 |
||
6180 |
// |
|
6181 |
// length is too short, just fill qwords |
|
6182 |
// |
|
6183 |
BIND(L_fill_8_bytes_loop); |
|
6184 |
movl(Address(to, 0), value); |
|
6185 |
movl(Address(to, 4), value); |
|
6186 |
addptr(to, 8); |
|
6187 |
BIND(L_fill_8_bytes); |
|
6188 |
subl(count, 1 << (shift + 1)); |
|
6189 |
jcc(Assembler::greaterEqual, L_fill_8_bytes_loop); |
|
6190 |
// fall through to fill 4 bytes |
|
6191 |
} else { |
|
6192 |
Label L_fill_32_bytes; |
|
6193 |
if (!UseUnalignedLoadStores) { |
|
6194 |
// align to 8 bytes, we know we are 4 byte aligned to start |
|
6195 |
testptr(to, 4); |
|
6196 |
jccb(Assembler::zero, L_fill_32_bytes); |
|
6197 |
movl(Address(to, 0), value); |
|
6198 |
addptr(to, 4); |
|
6199 |
subl(count, 1<<shift); |
|
6200 |
} |
|
6201 |
BIND(L_fill_32_bytes); |
|
6202 |
{ |
|
6203 |
assert( UseSSE >= 2, "supported cpu only" ); |
|
6204 |
Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes; |
|
6205 |
movdl(xtmp, value); |
|
15115
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6206 |
if (UseAVX >= 2 && UseUnalignedLoadStores) { |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6207 |
// Fill 64-byte chunks |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6208 |
Label L_fill_64_bytes_loop, L_check_fill_32_bytes; |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6209 |
vpbroadcastd(xtmp, xtmp); |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6210 |
|
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6211 |
subl(count, 16 << shift); |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6212 |
jcc(Assembler::less, L_check_fill_32_bytes); |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6213 |
align(16); |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6214 |
|
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6215 |
BIND(L_fill_64_bytes_loop); |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6216 |
vmovdqu(Address(to, 0), xtmp); |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6217 |
vmovdqu(Address(to, 32), xtmp); |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6218 |
addptr(to, 64); |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6219 |
subl(count, 16 << shift); |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6220 |
jcc(Assembler::greaterEqual, L_fill_64_bytes_loop); |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6221 |
|
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6222 |
BIND(L_check_fill_32_bytes); |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6223 |
addl(count, 8 << shift); |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6224 |
jccb(Assembler::less, L_check_fill_8_bytes); |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6225 |
vmovdqu(Address(to, 0), xtmp); |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6226 |
addptr(to, 32); |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6227 |
subl(count, 8 << shift); |
16624
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
6228 |
|
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
6229 |
BIND(L_check_fill_8_bytes); |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
6230 |
// clean upper bits of YMM registers |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
6231 |
vzeroupper(); |
14626 | 6232 |
} else { |
15115
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6233 |
// Fill 32-byte chunks |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6234 |
pshufd(xtmp, xtmp, 0); |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6235 |
|
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6236 |
subl(count, 8 << shift); |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6237 |
jcc(Assembler::less, L_check_fill_8_bytes); |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6238 |
align(16); |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6239 |
|
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6240 |
BIND(L_fill_32_bytes_loop); |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6241 |
|
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6242 |
if (UseUnalignedLoadStores) { |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6243 |
movdqu(Address(to, 0), xtmp); |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6244 |
movdqu(Address(to, 16), xtmp); |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6245 |
} else { |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6246 |
movq(Address(to, 0), xtmp); |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6247 |
movq(Address(to, 8), xtmp); |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6248 |
movq(Address(to, 16), xtmp); |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6249 |
movq(Address(to, 24), xtmp); |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6250 |
} |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6251 |
|
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6252 |
addptr(to, 32); |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6253 |
subl(count, 8 << shift); |
f8ef87f6f07f
8005544: Use 256bit YMM registers in arraycopy stubs on x86
kvn
parents:
15114
diff
changeset
|
6254 |
jcc(Assembler::greaterEqual, L_fill_32_bytes_loop); |
16624
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
6255 |
|
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
6256 |
BIND(L_check_fill_8_bytes); |
14626 | 6257 |
} |
6258 |
addl(count, 8 << shift); |
|
6259 |
jccb(Assembler::zero, L_exit); |
|
6260 |
jmpb(L_fill_8_bytes); |
|
6261 |
||
6262 |
// |
|
6263 |
// length is too short, just fill qwords |
|
6264 |
// |
|
6265 |
BIND(L_fill_8_bytes_loop); |
|
6266 |
movq(Address(to, 0), xtmp); |
|
6267 |
addptr(to, 8); |
|
6268 |
BIND(L_fill_8_bytes); |
|
6269 |
subl(count, 1 << (shift + 1)); |
|
6270 |
jcc(Assembler::greaterEqual, L_fill_8_bytes_loop); |
|
6271 |
} |
|
6272 |
} |
|
6273 |
// fill trailing 4 bytes |
|
6274 |
BIND(L_fill_4_bytes); |
|
6275 |
testl(count, 1<<shift); |
|
6276 |
jccb(Assembler::zero, L_fill_2_bytes); |
|
6277 |
movl(Address(to, 0), value); |
|
6278 |
if (t == T_BYTE || t == T_SHORT) { |
|
6279 |
addptr(to, 4); |
|
6280 |
BIND(L_fill_2_bytes); |
|
6281 |
// fill trailing 2 bytes |
|
6282 |
testl(count, 1<<(shift-1)); |
|
6283 |
jccb(Assembler::zero, L_fill_byte); |
|
6284 |
movw(Address(to, 0), value); |
|
6285 |
if (t == T_BYTE) { |
|
6286 |
addptr(to, 2); |
|
6287 |
BIND(L_fill_byte); |
|
6288 |
// fill trailing byte |
|
6289 |
testl(count, 1); |
|
6290 |
jccb(Assembler::zero, L_exit); |
|
6291 |
movb(Address(to, 0), value); |
|
6292 |
} else { |
|
6293 |
BIND(L_fill_byte); |
|
6294 |
} |
|
6295 |
} else { |
|
6296 |
BIND(L_fill_2_bytes); |
|
6297 |
} |
|
6298 |
BIND(L_exit); |
|
6299 |
} |
|
15242
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6300 |
|
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6301 |
// encode char[] to byte[] in ISO_8859_1 |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6302 |
void MacroAssembler::encode_iso_array(Register src, Register dst, Register len, |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6303 |
XMMRegister tmp1Reg, XMMRegister tmp2Reg, |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6304 |
XMMRegister tmp3Reg, XMMRegister tmp4Reg, |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6305 |
Register tmp5, Register result) { |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6306 |
// rsi: src |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6307 |
// rdi: dst |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6308 |
// rdx: len |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6309 |
// rcx: tmp5 |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6310 |
// rax: result |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6311 |
ShortBranchVerifier sbv(this); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6312 |
assert_different_registers(src, dst, len, tmp5, result); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6313 |
Label L_done, L_copy_1_char, L_copy_1_char_exit; |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6314 |
|
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6315 |
// set result |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6316 |
xorl(result, result); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6317 |
// check for zero length |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6318 |
testl(len, len); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6319 |
jcc(Assembler::zero, L_done); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6320 |
movl(result, len); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6321 |
|
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6322 |
// Setup pointers |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6323 |
lea(src, Address(src, len, Address::times_2)); // char[] |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6324 |
lea(dst, Address(dst, len, Address::times_1)); // byte[] |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6325 |
negptr(len); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6326 |
|
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6327 |
if (UseSSE42Intrinsics || UseAVX >= 2) { |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6328 |
Label L_chars_8_check, L_copy_8_chars, L_copy_8_chars_exit; |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6329 |
Label L_chars_16_check, L_copy_16_chars, L_copy_16_chars_exit; |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6330 |
|
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6331 |
if (UseAVX >= 2) { |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6332 |
Label L_chars_32_check, L_copy_32_chars, L_copy_32_chars_exit; |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6333 |
movl(tmp5, 0xff00ff00); // create mask to test for Unicode chars in vector |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6334 |
movdl(tmp1Reg, tmp5); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6335 |
vpbroadcastd(tmp1Reg, tmp1Reg); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6336 |
jmpb(L_chars_32_check); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6337 |
|
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6338 |
bind(L_copy_32_chars); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6339 |
vmovdqu(tmp3Reg, Address(src, len, Address::times_2, -64)); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6340 |
vmovdqu(tmp4Reg, Address(src, len, Address::times_2, -32)); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6341 |
vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector256 */ true); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6342 |
vptest(tmp2Reg, tmp1Reg); // check for Unicode chars in vector |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6343 |
jccb(Assembler::notZero, L_copy_32_chars_exit); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6344 |
vpackuswb(tmp3Reg, tmp3Reg, tmp4Reg, /* vector256 */ true); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6345 |
vpermq(tmp4Reg, tmp3Reg, 0xD8, /* vector256 */ true); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6346 |
vmovdqu(Address(dst, len, Address::times_1, -32), tmp4Reg); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6347 |
|
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6348 |
bind(L_chars_32_check); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6349 |
addptr(len, 32); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6350 |
jccb(Assembler::lessEqual, L_copy_32_chars); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6351 |
|
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6352 |
bind(L_copy_32_chars_exit); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6353 |
subptr(len, 16); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6354 |
jccb(Assembler::greater, L_copy_16_chars_exit); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6355 |
|
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6356 |
} else if (UseSSE42Intrinsics) { |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6357 |
movl(tmp5, 0xff00ff00); // create mask to test for Unicode chars in vector |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6358 |
movdl(tmp1Reg, tmp5); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6359 |
pshufd(tmp1Reg, tmp1Reg, 0); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6360 |
jmpb(L_chars_16_check); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6361 |
} |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6362 |
|
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6363 |
bind(L_copy_16_chars); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6364 |
if (UseAVX >= 2) { |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6365 |
vmovdqu(tmp2Reg, Address(src, len, Address::times_2, -32)); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6366 |
vptest(tmp2Reg, tmp1Reg); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6367 |
jccb(Assembler::notZero, L_copy_16_chars_exit); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6368 |
vpackuswb(tmp2Reg, tmp2Reg, tmp1Reg, /* vector256 */ true); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6369 |
vpermq(tmp3Reg, tmp2Reg, 0xD8, /* vector256 */ true); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6370 |
} else { |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6371 |
if (UseAVX > 0) { |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6372 |
movdqu(tmp3Reg, Address(src, len, Address::times_2, -32)); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6373 |
movdqu(tmp4Reg, Address(src, len, Address::times_2, -16)); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6374 |
vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector256 */ false); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6375 |
} else { |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6376 |
movdqu(tmp3Reg, Address(src, len, Address::times_2, -32)); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6377 |
por(tmp2Reg, tmp3Reg); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6378 |
movdqu(tmp4Reg, Address(src, len, Address::times_2, -16)); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6379 |
por(tmp2Reg, tmp4Reg); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6380 |
} |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6381 |
ptest(tmp2Reg, tmp1Reg); // check for Unicode chars in vector |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6382 |
jccb(Assembler::notZero, L_copy_16_chars_exit); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6383 |
packuswb(tmp3Reg, tmp4Reg); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6384 |
} |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6385 |
movdqu(Address(dst, len, Address::times_1, -16), tmp3Reg); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6386 |
|
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6387 |
bind(L_chars_16_check); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6388 |
addptr(len, 16); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6389 |
jccb(Assembler::lessEqual, L_copy_16_chars); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6390 |
|
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6391 |
bind(L_copy_16_chars_exit); |
16624
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
6392 |
if (UseAVX >= 2) { |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
6393 |
// clean upper bits of YMM registers |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
6394 |
vzeroupper(); |
9dbd4b210bf9
8011102: Clear AVX registers after return from JNI call
kvn
parents:
16368
diff
changeset
|
6395 |
} |
15242
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6396 |
subptr(len, 8); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6397 |
jccb(Assembler::greater, L_copy_8_chars_exit); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6398 |
|
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6399 |
bind(L_copy_8_chars); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6400 |
movdqu(tmp3Reg, Address(src, len, Address::times_2, -16)); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6401 |
ptest(tmp3Reg, tmp1Reg); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6402 |
jccb(Assembler::notZero, L_copy_8_chars_exit); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6403 |
packuswb(tmp3Reg, tmp1Reg); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6404 |
movq(Address(dst, len, Address::times_1, -8), tmp3Reg); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6405 |
addptr(len, 8); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6406 |
jccb(Assembler::lessEqual, L_copy_8_chars); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6407 |
|
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6408 |
bind(L_copy_8_chars_exit); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6409 |
subptr(len, 8); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6410 |
jccb(Assembler::zero, L_done); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6411 |
} |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6412 |
|
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6413 |
bind(L_copy_1_char); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6414 |
load_unsigned_short(tmp5, Address(src, len, Address::times_2, 0)); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6415 |
testl(tmp5, 0xff00); // check if Unicode char |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6416 |
jccb(Assembler::notZero, L_copy_1_char_exit); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6417 |
movb(Address(dst, len, Address::times_1, 0), tmp5); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6418 |
addptr(len, 1); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6419 |
jccb(Assembler::less, L_copy_1_char); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6420 |
|
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6421 |
bind(L_copy_1_char_exit); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6422 |
addptr(result, len); // len is negative count of not processed elements |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6423 |
bind(L_done); |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6424 |
} |
695bb216be99
6896617: Optimize sun.nio.cs.ISO_8859_1$Encode.encodeArrayLoop() on x86
kvn
parents:
15117
diff
changeset
|
6425 |
|
18507
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6426 |
/** |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6427 |
* Emits code to update CRC-32 with a byte value according to constants in table |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6428 |
* |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6429 |
* @param [in,out]crc Register containing the crc. |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6430 |
* @param [in]val Register containing the byte to fold into the CRC. |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6431 |
* @param [in]table Register containing the table of crc constants. |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6432 |
* |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6433 |
* uint32_t crc; |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6434 |
* val = crc_table[(val ^ crc) & 0xFF]; |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6435 |
* crc = val ^ (crc >> 8); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6436 |
* |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6437 |
*/ |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6438 |
void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6439 |
xorl(val, crc); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6440 |
andl(val, 0xFF); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6441 |
shrl(crc, 8); // unsigned shift |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6442 |
xorl(crc, Address(table, val, Address::times_4, 0)); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6443 |
} |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6444 |
|
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6445 |
/** |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6446 |
* Fold 128-bit data chunk |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6447 |
*/ |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6448 |
void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) { |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6449 |
vpclmulhdq(xtmp, xK, xcrc); // [123:64] |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6450 |
vpclmulldq(xcrc, xK, xcrc); // [63:0] |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6451 |
vpxor(xcrc, xcrc, Address(buf, offset), false /* vector256 */); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6452 |
pxor(xcrc, xtmp); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6453 |
} |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6454 |
|
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6455 |
void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) { |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6456 |
vpclmulhdq(xtmp, xK, xcrc); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6457 |
vpclmulldq(xcrc, xK, xcrc); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6458 |
pxor(xcrc, xbuf); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6459 |
pxor(xcrc, xtmp); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6460 |
} |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6461 |
|
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6462 |
/** |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6463 |
* 8-bit folds to compute 32-bit CRC |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6464 |
* |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6465 |
* uint64_t xcrc; |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6466 |
* timesXtoThe32[xcrc & 0xFF] ^ (xcrc >> 8); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6467 |
*/ |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6468 |
void MacroAssembler::fold_8bit_crc32(XMMRegister xcrc, Register table, XMMRegister xtmp, Register tmp) { |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6469 |
movdl(tmp, xcrc); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6470 |
andl(tmp, 0xFF); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6471 |
movdl(xtmp, Address(table, tmp, Address::times_4, 0)); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6472 |
psrldq(xcrc, 1); // unsigned shift one byte |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6473 |
pxor(xcrc, xtmp); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6474 |
} |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6475 |
|
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6476 |
/** |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6477 |
* uint32_t crc; |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6478 |
* timesXtoThe32[crc & 0xFF] ^ (crc >> 8); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6479 |
*/ |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6480 |
void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6481 |
movl(tmp, crc); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6482 |
andl(tmp, 0xFF); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6483 |
shrl(crc, 8); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6484 |
xorl(crc, Address(table, tmp, Address::times_4, 0)); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6485 |
} |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6486 |
|
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6487 |
/** |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6488 |
* @param crc register containing existing CRC (32-bit) |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6489 |
* @param buf register pointing to input byte buffer (byte*) |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6490 |
* @param len register containing number of bytes |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6491 |
* @param table register that will contain address of CRC table |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6492 |
* @param tmp scratch register |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6493 |
*/ |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6494 |
void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp) { |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6495 |
assert_different_registers(crc, buf, len, table, tmp, rax); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6496 |
|
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6497 |
Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned; |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6498 |
Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop; |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6499 |
|
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6500 |
lea(table, ExternalAddress(StubRoutines::crc_table_addr())); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6501 |
notl(crc); // ~crc |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6502 |
cmpl(len, 16); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6503 |
jcc(Assembler::less, L_tail); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6504 |
|
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6505 |
// Align buffer to 16 bytes |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6506 |
movl(tmp, buf); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6507 |
andl(tmp, 0xF); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6508 |
jccb(Assembler::zero, L_aligned); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6509 |
subl(tmp, 16); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6510 |
addl(len, tmp); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6511 |
|
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6512 |
align(4); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6513 |
BIND(L_align_loop); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6514 |
movsbl(rax, Address(buf, 0)); // load byte with sign extension |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6515 |
update_byte_crc32(crc, rax, table); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6516 |
increment(buf); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6517 |
incrementl(tmp); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6518 |
jccb(Assembler::less, L_align_loop); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6519 |
|
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6520 |
BIND(L_aligned); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6521 |
movl(tmp, len); // save |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6522 |
shrl(len, 4); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6523 |
jcc(Assembler::zero, L_tail_restore); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6524 |
|
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6525 |
// Fold crc into first bytes of vector |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6526 |
movdqa(xmm1, Address(buf, 0)); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6527 |
movdl(rax, xmm1); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6528 |
xorl(crc, rax); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6529 |
pinsrd(xmm1, crc, 0); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6530 |
addptr(buf, 16); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6531 |
subl(len, 4); // len > 0 |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6532 |
jcc(Assembler::less, L_fold_tail); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6533 |
|
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6534 |
movdqa(xmm2, Address(buf, 0)); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6535 |
movdqa(xmm3, Address(buf, 16)); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6536 |
movdqa(xmm4, Address(buf, 32)); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6537 |
addptr(buf, 48); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6538 |
subl(len, 3); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6539 |
jcc(Assembler::lessEqual, L_fold_512b); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6540 |
|
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6541 |
// Fold total 512 bits of polynomial on each iteration, |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6542 |
// 128 bits per each of 4 parallel streams. |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6543 |
movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32)); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6544 |
|
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6545 |
align(32); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6546 |
BIND(L_fold_512b_loop); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6547 |
fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6548 |
fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6549 |
fold_128bit_crc32(xmm3, xmm0, xmm5, buf, 32); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6550 |
fold_128bit_crc32(xmm4, xmm0, xmm5, buf, 48); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6551 |
addptr(buf, 64); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6552 |
subl(len, 4); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6553 |
jcc(Assembler::greater, L_fold_512b_loop); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6554 |
|
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6555 |
// Fold 512 bits to 128 bits. |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6556 |
BIND(L_fold_512b); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6557 |
movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16)); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6558 |
fold_128bit_crc32(xmm1, xmm0, xmm5, xmm2); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6559 |
fold_128bit_crc32(xmm1, xmm0, xmm5, xmm3); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6560 |
fold_128bit_crc32(xmm1, xmm0, xmm5, xmm4); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6561 |
|
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6562 |
// Fold the rest of 128 bits data chunks |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6563 |
BIND(L_fold_tail); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6564 |
addl(len, 3); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6565 |
jccb(Assembler::lessEqual, L_fold_128b); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6566 |
movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16)); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6567 |
|
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6568 |
BIND(L_fold_tail_loop); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6569 |
fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6570 |
addptr(buf, 16); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6571 |
decrementl(len); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6572 |
jccb(Assembler::greater, L_fold_tail_loop); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6573 |
|
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6574 |
// Fold 128 bits in xmm1 down into 32 bits in crc register. |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6575 |
BIND(L_fold_128b); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6576 |
movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr())); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6577 |
vpclmulqdq(xmm2, xmm0, xmm1, 0x1); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6578 |
vpand(xmm3, xmm0, xmm2, false /* vector256 */); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6579 |
vpclmulqdq(xmm0, xmm0, xmm3, 0x1); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6580 |
psrldq(xmm1, 8); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6581 |
psrldq(xmm2, 4); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6582 |
pxor(xmm0, xmm1); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6583 |
pxor(xmm0, xmm2); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6584 |
|
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6585 |
// 8 8-bit folds to compute 32-bit CRC. |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6586 |
for (int j = 0; j < 4; j++) { |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6587 |
fold_8bit_crc32(xmm0, table, xmm1, rax); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6588 |
} |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6589 |
movdl(crc, xmm0); // mov 32 bits to general register |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6590 |
for (int j = 0; j < 4; j++) { |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6591 |
fold_8bit_crc32(crc, table, rax); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6592 |
} |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6593 |
|
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6594 |
BIND(L_tail_restore); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6595 |
movl(len, tmp); // restore |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6596 |
BIND(L_tail); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6597 |
andl(len, 0xf); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6598 |
jccb(Assembler::zero, L_exit); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6599 |
|
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6600 |
// Fold the rest of bytes |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6601 |
align(4); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6602 |
BIND(L_tail_loop); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6603 |
movsbl(rax, Address(buf, 0)); // load byte with sign extension |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6604 |
update_byte_crc32(crc, rax, table); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6605 |
increment(buf); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6606 |
decrementl(len); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6607 |
jccb(Assembler::greater, L_tail_loop); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6608 |
|
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6609 |
BIND(L_exit); |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6610 |
notl(crc); // ~c |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6611 |
} |
61bfc8995bb3
7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
drchase
parents:
16624
diff
changeset
|
6612 |
|
14626 | 6613 |
#undef BIND |
6614 |
#undef BLOCK_COMMENT |
|
6615 |
||
6616 |
||
6617 |
Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { |
|
6618 |
switch (cond) { |
|
6619 |
// Note some conditions are synonyms for others |
|
6620 |
case Assembler::zero: return Assembler::notZero; |
|
6621 |
case Assembler::notZero: return Assembler::zero; |
|
6622 |
case Assembler::less: return Assembler::greaterEqual; |
|
6623 |
case Assembler::lessEqual: return Assembler::greater; |
|
6624 |
case Assembler::greater: return Assembler::lessEqual; |
|
6625 |
case Assembler::greaterEqual: return Assembler::less; |
|
6626 |
case Assembler::below: return Assembler::aboveEqual; |
|
6627 |
case Assembler::belowEqual: return Assembler::above; |
|
6628 |
case Assembler::above: return Assembler::belowEqual; |
|
6629 |
case Assembler::aboveEqual: return Assembler::below; |
|
6630 |
case Assembler::overflow: return Assembler::noOverflow; |
|
6631 |
case Assembler::noOverflow: return Assembler::overflow; |
|
6632 |
case Assembler::negative: return Assembler::positive; |
|
6633 |
case Assembler::positive: return Assembler::negative; |
|
6634 |
case Assembler::parity: return Assembler::noParity; |
|
6635 |
case Assembler::noParity: return Assembler::parity; |
|
6636 |
} |
|
6637 |
ShouldNotReachHere(); return Assembler::overflow; |
|
6638 |
} |
|
6639 |
||
6640 |
SkipIfEqual::SkipIfEqual( |
|
6641 |
MacroAssembler* masm, const bool* flag_addr, bool value) { |
|
6642 |
_masm = masm; |
|
6643 |
_masm->cmp8(ExternalAddress((address)flag_addr), value); |
|
6644 |
_masm->jcc(Assembler::equal, _label); |
|
6645 |
} |
|
6646 |
||
6647 |
SkipIfEqual::~SkipIfEqual() { |
|
6648 |
_masm->bind(_label); |
|
6649 |
} |