1 /* |
|
2 * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #include "precompiled.hpp" |
|
26 #include "asm/macroAssembler.hpp" |
|
27 #include "interpreter/interpreter.hpp" |
|
28 #include "interpreter/interpreterRuntime.hpp" |
|
29 #include "interpreter/interp_masm.hpp" |
|
30 #include "interpreter/templateTable.hpp" |
|
31 #include "memory/universe.inline.hpp" |
|
32 #include "oops/methodData.hpp" |
|
33 #include "oops/objArrayKlass.hpp" |
|
34 #include "oops/oop.inline.hpp" |
|
35 #include "prims/methodHandles.hpp" |
|
36 #include "runtime/sharedRuntime.hpp" |
|
37 #include "runtime/stubRoutines.hpp" |
|
38 #include "runtime/synchronizer.hpp" |
|
39 #include "utilities/macros.hpp" |
|
40 |
|
41 #ifndef CC_INTERP |
|
42 |
|
43 #define __ _masm-> |
|
44 |
|
45 // Platform-dependent initialization |
|
46 |
|
47 void TemplateTable::pd_initialize() { |
|
48 // No amd64 specific initialization |
|
49 } |
|
50 |
|
51 // Address computation: local variables |
|
52 |
|
53 static inline Address iaddress(int n) { |
|
54 return Address(r14, Interpreter::local_offset_in_bytes(n)); |
|
55 } |
|
56 |
|
57 static inline Address laddress(int n) { |
|
58 return iaddress(n + 1); |
|
59 } |
|
60 |
|
61 static inline Address faddress(int n) { |
|
62 return iaddress(n); |
|
63 } |
|
64 |
|
65 static inline Address daddress(int n) { |
|
66 return laddress(n); |
|
67 } |
|
68 |
|
69 static inline Address aaddress(int n) { |
|
70 return iaddress(n); |
|
71 } |
|
72 |
|
73 static inline Address iaddress(Register r) { |
|
74 return Address(r14, r, Address::times_8); |
|
75 } |
|
76 |
|
77 static inline Address laddress(Register r) { |
|
78 return Address(r14, r, Address::times_8, Interpreter::local_offset_in_bytes(1)); |
|
79 } |
|
80 |
|
81 static inline Address faddress(Register r) { |
|
82 return iaddress(r); |
|
83 } |
|
84 |
|
85 static inline Address daddress(Register r) { |
|
86 return laddress(r); |
|
87 } |
|
88 |
|
89 static inline Address aaddress(Register r) { |
|
90 return iaddress(r); |
|
91 } |
|
92 |
|
93 static inline Address at_rsp() { |
|
94 return Address(rsp, 0); |
|
95 } |
|
96 |
|
97 // At top of Java expression stack which may be different than esp(). It |
|
98 // isn't for category 1 objects. |
|
99 static inline Address at_tos () { |
|
100 return Address(rsp, Interpreter::expr_offset_in_bytes(0)); |
|
101 } |
|
102 |
|
103 static inline Address at_tos_p1() { |
|
104 return Address(rsp, Interpreter::expr_offset_in_bytes(1)); |
|
105 } |
|
106 |
|
107 static inline Address at_tos_p2() { |
|
108 return Address(rsp, Interpreter::expr_offset_in_bytes(2)); |
|
109 } |
|
110 |
|
111 // Condition conversion |
|
112 static Assembler::Condition j_not(TemplateTable::Condition cc) { |
|
113 switch (cc) { |
|
114 case TemplateTable::equal : return Assembler::notEqual; |
|
115 case TemplateTable::not_equal : return Assembler::equal; |
|
116 case TemplateTable::less : return Assembler::greaterEqual; |
|
117 case TemplateTable::less_equal : return Assembler::greater; |
|
118 case TemplateTable::greater : return Assembler::lessEqual; |
|
119 case TemplateTable::greater_equal: return Assembler::less; |
|
120 } |
|
121 ShouldNotReachHere(); |
|
122 return Assembler::zero; |
|
123 } |
|
124 |
|
125 |
|
126 // Miscelaneous helper routines |
|
127 // Store an oop (or NULL) at the address described by obj. |
|
128 // If val == noreg this means store a NULL |
|
129 |
|
130 static void do_oop_store(InterpreterMacroAssembler* _masm, |
|
131 Address obj, |
|
132 Register val, |
|
133 BarrierSet::Name barrier, |
|
134 bool precise) { |
|
135 assert(val == noreg || val == rax, "parameter is just for looks"); |
|
136 switch (barrier) { |
|
137 #if INCLUDE_ALL_GCS |
|
138 case BarrierSet::G1SATBCT: |
|
139 case BarrierSet::G1SATBCTLogging: |
|
140 { |
|
141 // flatten object address if needed |
|
142 if (obj.index() == noreg && obj.disp() == 0) { |
|
143 if (obj.base() != rdx) { |
|
144 __ movq(rdx, obj.base()); |
|
145 } |
|
146 } else { |
|
147 __ leaq(rdx, obj); |
|
148 } |
|
149 __ g1_write_barrier_pre(rdx /* obj */, |
|
150 rbx /* pre_val */, |
|
151 r15_thread /* thread */, |
|
152 r8 /* tmp */, |
|
153 val != noreg /* tosca_live */, |
|
154 false /* expand_call */); |
|
155 if (val == noreg) { |
|
156 __ store_heap_oop_null(Address(rdx, 0)); |
|
157 } else { |
|
158 // G1 barrier needs uncompressed oop for region cross check. |
|
159 Register new_val = val; |
|
160 if (UseCompressedOops) { |
|
161 new_val = rbx; |
|
162 __ movptr(new_val, val); |
|
163 } |
|
164 __ store_heap_oop(Address(rdx, 0), val); |
|
165 __ g1_write_barrier_post(rdx /* store_adr */, |
|
166 new_val /* new_val */, |
|
167 r15_thread /* thread */, |
|
168 r8 /* tmp */, |
|
169 rbx /* tmp2 */); |
|
170 } |
|
171 } |
|
172 break; |
|
173 #endif // INCLUDE_ALL_GCS |
|
174 case BarrierSet::CardTableModRef: |
|
175 case BarrierSet::CardTableExtension: |
|
176 { |
|
177 if (val == noreg) { |
|
178 __ store_heap_oop_null(obj); |
|
179 } else { |
|
180 __ store_heap_oop(obj, val); |
|
181 // flatten object address if needed |
|
182 if (!precise || (obj.index() == noreg && obj.disp() == 0)) { |
|
183 __ store_check(obj.base()); |
|
184 } else { |
|
185 __ leaq(rdx, obj); |
|
186 __ store_check(rdx); |
|
187 } |
|
188 } |
|
189 } |
|
190 break; |
|
191 case BarrierSet::ModRef: |
|
192 if (val == noreg) { |
|
193 __ store_heap_oop_null(obj); |
|
194 } else { |
|
195 __ store_heap_oop(obj, val); |
|
196 } |
|
197 break; |
|
198 default : |
|
199 ShouldNotReachHere(); |
|
200 |
|
201 } |
|
202 } |
|
203 |
|
204 Address TemplateTable::at_bcp(int offset) { |
|
205 assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); |
|
206 return Address(r13, offset); |
|
207 } |
|
208 |
|
209 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, |
|
210 Register temp_reg, bool load_bc_into_bc_reg/*=true*/, |
|
211 int byte_no) { |
|
212 if (!RewriteBytecodes) return; |
|
213 Label L_patch_done; |
|
214 |
|
215 switch (bc) { |
|
216 case Bytecodes::_fast_aputfield: |
|
217 case Bytecodes::_fast_bputfield: |
|
218 case Bytecodes::_fast_cputfield: |
|
219 case Bytecodes::_fast_dputfield: |
|
220 case Bytecodes::_fast_fputfield: |
|
221 case Bytecodes::_fast_iputfield: |
|
222 case Bytecodes::_fast_lputfield: |
|
223 case Bytecodes::_fast_sputfield: |
|
224 { |
|
225 // We skip bytecode quickening for putfield instructions when |
|
226 // the put_code written to the constant pool cache is zero. |
|
227 // This is required so that every execution of this instruction |
|
228 // calls out to InterpreterRuntime::resolve_get_put to do |
|
229 // additional, required work. |
|
230 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); |
|
231 assert(load_bc_into_bc_reg, "we use bc_reg as temp"); |
|
232 __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1); |
|
233 __ movl(bc_reg, bc); |
|
234 __ cmpl(temp_reg, (int) 0); |
|
235 __ jcc(Assembler::zero, L_patch_done); // don't patch |
|
236 } |
|
237 break; |
|
238 default: |
|
239 assert(byte_no == -1, "sanity"); |
|
240 // the pair bytecodes have already done the load. |
|
241 if (load_bc_into_bc_reg) { |
|
242 __ movl(bc_reg, bc); |
|
243 } |
|
244 } |
|
245 |
|
246 if (JvmtiExport::can_post_breakpoint()) { |
|
247 Label L_fast_patch; |
|
248 // if a breakpoint is present we can't rewrite the stream directly |
|
249 __ movzbl(temp_reg, at_bcp(0)); |
|
250 __ cmpl(temp_reg, Bytecodes::_breakpoint); |
|
251 __ jcc(Assembler::notEqual, L_fast_patch); |
|
252 __ get_method(temp_reg); |
|
253 // Let breakpoint table handling rewrite to quicker bytecode |
|
254 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, r13, bc_reg); |
|
255 #ifndef ASSERT |
|
256 __ jmpb(L_patch_done); |
|
257 #else |
|
258 __ jmp(L_patch_done); |
|
259 #endif |
|
260 __ bind(L_fast_patch); |
|
261 } |
|
262 |
|
263 #ifdef ASSERT |
|
264 Label L_okay; |
|
265 __ load_unsigned_byte(temp_reg, at_bcp(0)); |
|
266 __ cmpl(temp_reg, (int) Bytecodes::java_code(bc)); |
|
267 __ jcc(Assembler::equal, L_okay); |
|
268 __ cmpl(temp_reg, bc_reg); |
|
269 __ jcc(Assembler::equal, L_okay); |
|
270 __ stop("patching the wrong bytecode"); |
|
271 __ bind(L_okay); |
|
272 #endif |
|
273 |
|
274 // patch bytecode |
|
275 __ movb(at_bcp(0), bc_reg); |
|
276 __ bind(L_patch_done); |
|
277 } |
|
278 |
|
279 |
|
280 // Individual instructions |
|
281 |
|
282 void TemplateTable::nop() { |
|
283 transition(vtos, vtos); |
|
284 // nothing to do |
|
285 } |
|
286 |
|
287 void TemplateTable::shouldnotreachhere() { |
|
288 transition(vtos, vtos); |
|
289 __ stop("shouldnotreachhere bytecode"); |
|
290 } |
|
291 |
|
292 void TemplateTable::aconst_null() { |
|
293 transition(vtos, atos); |
|
294 __ xorl(rax, rax); |
|
295 } |
|
296 |
|
297 void TemplateTable::iconst(int value) { |
|
298 transition(vtos, itos); |
|
299 if (value == 0) { |
|
300 __ xorl(rax, rax); |
|
301 } else { |
|
302 __ movl(rax, value); |
|
303 } |
|
304 } |
|
305 |
|
306 void TemplateTable::lconst(int value) { |
|
307 transition(vtos, ltos); |
|
308 if (value == 0) { |
|
309 __ xorl(rax, rax); |
|
310 } else { |
|
311 __ movl(rax, value); |
|
312 } |
|
313 } |
|
314 |
|
315 void TemplateTable::fconst(int value) { |
|
316 transition(vtos, ftos); |
|
317 static float one = 1.0f, two = 2.0f; |
|
318 switch (value) { |
|
319 case 0: |
|
320 __ xorps(xmm0, xmm0); |
|
321 break; |
|
322 case 1: |
|
323 __ movflt(xmm0, ExternalAddress((address) &one)); |
|
324 break; |
|
325 case 2: |
|
326 __ movflt(xmm0, ExternalAddress((address) &two)); |
|
327 break; |
|
328 default: |
|
329 ShouldNotReachHere(); |
|
330 break; |
|
331 } |
|
332 } |
|
333 |
|
334 void TemplateTable::dconst(int value) { |
|
335 transition(vtos, dtos); |
|
336 static double one = 1.0; |
|
337 switch (value) { |
|
338 case 0: |
|
339 __ xorpd(xmm0, xmm0); |
|
340 break; |
|
341 case 1: |
|
342 __ movdbl(xmm0, ExternalAddress((address) &one)); |
|
343 break; |
|
344 default: |
|
345 ShouldNotReachHere(); |
|
346 break; |
|
347 } |
|
348 } |
|
349 |
|
350 void TemplateTable::bipush() { |
|
351 transition(vtos, itos); |
|
352 __ load_signed_byte(rax, at_bcp(1)); |
|
353 } |
|
354 |
|
355 void TemplateTable::sipush() { |
|
356 transition(vtos, itos); |
|
357 __ load_unsigned_short(rax, at_bcp(1)); |
|
358 __ bswapl(rax); |
|
359 __ sarl(rax, 16); |
|
360 } |
|
361 |
|
362 void TemplateTable::ldc(bool wide) { |
|
363 transition(vtos, vtos); |
|
364 Label call_ldc, notFloat, notClass, Done; |
|
365 |
|
366 if (wide) { |
|
367 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); |
|
368 } else { |
|
369 __ load_unsigned_byte(rbx, at_bcp(1)); |
|
370 } |
|
371 |
|
372 __ get_cpool_and_tags(rcx, rax); |
|
373 const int base_offset = ConstantPool::header_size() * wordSize; |
|
374 const int tags_offset = Array<u1>::base_offset_in_bytes(); |
|
375 |
|
376 // get type |
|
377 __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset)); |
|
378 |
|
379 // unresolved class - get the resolved class |
|
380 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass); |
|
381 __ jccb(Assembler::equal, call_ldc); |
|
382 |
|
383 // unresolved class in error state - call into runtime to throw the error |
|
384 // from the first resolution attempt |
|
385 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError); |
|
386 __ jccb(Assembler::equal, call_ldc); |
|
387 |
|
388 // resolved class - need to call vm to get java mirror of the class |
|
389 __ cmpl(rdx, JVM_CONSTANT_Class); |
|
390 __ jcc(Assembler::notEqual, notClass); |
|
391 |
|
392 __ bind(call_ldc); |
|
393 __ movl(c_rarg1, wide); |
|
394 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), c_rarg1); |
|
395 __ push_ptr(rax); |
|
396 __ verify_oop(rax); |
|
397 __ jmp(Done); |
|
398 |
|
399 __ bind(notClass); |
|
400 __ cmpl(rdx, JVM_CONSTANT_Float); |
|
401 __ jccb(Assembler::notEqual, notFloat); |
|
402 // ftos |
|
403 __ movflt(xmm0, Address(rcx, rbx, Address::times_8, base_offset)); |
|
404 __ push_f(); |
|
405 __ jmp(Done); |
|
406 |
|
407 __ bind(notFloat); |
|
408 #ifdef ASSERT |
|
409 { |
|
410 Label L; |
|
411 __ cmpl(rdx, JVM_CONSTANT_Integer); |
|
412 __ jcc(Assembler::equal, L); |
|
413 // String and Object are rewritten to fast_aldc |
|
414 __ stop("unexpected tag type in ldc"); |
|
415 __ bind(L); |
|
416 } |
|
417 #endif |
|
418 // itos JVM_CONSTANT_Integer only |
|
419 __ movl(rax, Address(rcx, rbx, Address::times_8, base_offset)); |
|
420 __ push_i(rax); |
|
421 __ bind(Done); |
|
422 } |
|
423 |
|
424 // Fast path for caching oop constants. |
|
425 void TemplateTable::fast_aldc(bool wide) { |
|
426 transition(vtos, atos); |
|
427 |
|
428 Register result = rax; |
|
429 Register tmp = rdx; |
|
430 int index_size = wide ? sizeof(u2) : sizeof(u1); |
|
431 |
|
432 Label resolved; |
|
433 |
|
434 // We are resolved if the resolved reference cache entry contains a |
|
435 // non-null object (String, MethodType, etc.) |
|
436 assert_different_registers(result, tmp); |
|
437 __ get_cache_index_at_bcp(tmp, 1, index_size); |
|
438 __ load_resolved_reference_at_index(result, tmp); |
|
439 __ testl(result, result); |
|
440 __ jcc(Assembler::notZero, resolved); |
|
441 |
|
442 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); |
|
443 |
|
444 // first time invocation - must resolve first |
|
445 __ movl(tmp, (int)bytecode()); |
|
446 __ call_VM(result, entry, tmp); |
|
447 |
|
448 __ bind(resolved); |
|
449 |
|
450 if (VerifyOops) { |
|
451 __ verify_oop(result); |
|
452 } |
|
453 } |
|
454 |
|
455 void TemplateTable::ldc2_w() { |
|
456 transition(vtos, vtos); |
|
457 Label Long, Done; |
|
458 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); |
|
459 |
|
460 __ get_cpool_and_tags(rcx, rax); |
|
461 const int base_offset = ConstantPool::header_size() * wordSize; |
|
462 const int tags_offset = Array<u1>::base_offset_in_bytes(); |
|
463 |
|
464 // get type |
|
465 __ cmpb(Address(rax, rbx, Address::times_1, tags_offset), |
|
466 JVM_CONSTANT_Double); |
|
467 __ jccb(Assembler::notEqual, Long); |
|
468 // dtos |
|
469 __ movdbl(xmm0, Address(rcx, rbx, Address::times_8, base_offset)); |
|
470 __ push_d(); |
|
471 __ jmpb(Done); |
|
472 |
|
473 __ bind(Long); |
|
474 // ltos |
|
475 __ movq(rax, Address(rcx, rbx, Address::times_8, base_offset)); |
|
476 __ push_l(); |
|
477 |
|
478 __ bind(Done); |
|
479 } |
|
480 |
|
481 void TemplateTable::locals_index(Register reg, int offset) { |
|
482 __ load_unsigned_byte(reg, at_bcp(offset)); |
|
483 __ negptr(reg); |
|
484 } |
|
485 |
|
486 void TemplateTable::iload() { |
|
487 transition(vtos, itos); |
|
488 if (RewriteFrequentPairs) { |
|
489 Label rewrite, done; |
|
490 const Register bc = c_rarg3; |
|
491 assert(rbx != bc, "register damaged"); |
|
492 |
|
493 // get next byte |
|
494 __ load_unsigned_byte(rbx, |
|
495 at_bcp(Bytecodes::length_for(Bytecodes::_iload))); |
|
496 // if _iload, wait to rewrite to iload2. We only want to rewrite the |
|
497 // last two iloads in a pair. Comparing against fast_iload means that |
|
498 // the next bytecode is neither an iload or a caload, and therefore |
|
499 // an iload pair. |
|
500 __ cmpl(rbx, Bytecodes::_iload); |
|
501 __ jcc(Assembler::equal, done); |
|
502 |
|
503 __ cmpl(rbx, Bytecodes::_fast_iload); |
|
504 __ movl(bc, Bytecodes::_fast_iload2); |
|
505 __ jccb(Assembler::equal, rewrite); |
|
506 |
|
507 // if _caload, rewrite to fast_icaload |
|
508 __ cmpl(rbx, Bytecodes::_caload); |
|
509 __ movl(bc, Bytecodes::_fast_icaload); |
|
510 __ jccb(Assembler::equal, rewrite); |
|
511 |
|
512 // rewrite so iload doesn't check again. |
|
513 __ movl(bc, Bytecodes::_fast_iload); |
|
514 |
|
515 // rewrite |
|
516 // bc: fast bytecode |
|
517 __ bind(rewrite); |
|
518 patch_bytecode(Bytecodes::_iload, bc, rbx, false); |
|
519 __ bind(done); |
|
520 } |
|
521 |
|
522 // Get the local value into tos |
|
523 locals_index(rbx); |
|
524 __ movl(rax, iaddress(rbx)); |
|
525 } |
|
526 |
|
527 void TemplateTable::fast_iload2() { |
|
528 transition(vtos, itos); |
|
529 locals_index(rbx); |
|
530 __ movl(rax, iaddress(rbx)); |
|
531 __ push(itos); |
|
532 locals_index(rbx, 3); |
|
533 __ movl(rax, iaddress(rbx)); |
|
534 } |
|
535 |
|
536 void TemplateTable::fast_iload() { |
|
537 transition(vtos, itos); |
|
538 locals_index(rbx); |
|
539 __ movl(rax, iaddress(rbx)); |
|
540 } |
|
541 |
|
542 void TemplateTable::lload() { |
|
543 transition(vtos, ltos); |
|
544 locals_index(rbx); |
|
545 __ movq(rax, laddress(rbx)); |
|
546 } |
|
547 |
|
548 void TemplateTable::fload() { |
|
549 transition(vtos, ftos); |
|
550 locals_index(rbx); |
|
551 __ movflt(xmm0, faddress(rbx)); |
|
552 } |
|
553 |
|
554 void TemplateTable::dload() { |
|
555 transition(vtos, dtos); |
|
556 locals_index(rbx); |
|
557 __ movdbl(xmm0, daddress(rbx)); |
|
558 } |
|
559 |
|
560 void TemplateTable::aload() { |
|
561 transition(vtos, atos); |
|
562 locals_index(rbx); |
|
563 __ movptr(rax, aaddress(rbx)); |
|
564 } |
|
565 |
|
566 void TemplateTable::locals_index_wide(Register reg) { |
|
567 __ load_unsigned_short(reg, at_bcp(2)); |
|
568 __ bswapl(reg); |
|
569 __ shrl(reg, 16); |
|
570 __ negptr(reg); |
|
571 } |
|
572 |
|
573 void TemplateTable::wide_iload() { |
|
574 transition(vtos, itos); |
|
575 locals_index_wide(rbx); |
|
576 __ movl(rax, iaddress(rbx)); |
|
577 } |
|
578 |
|
579 void TemplateTable::wide_lload() { |
|
580 transition(vtos, ltos); |
|
581 locals_index_wide(rbx); |
|
582 __ movq(rax, laddress(rbx)); |
|
583 } |
|
584 |
|
585 void TemplateTable::wide_fload() { |
|
586 transition(vtos, ftos); |
|
587 locals_index_wide(rbx); |
|
588 __ movflt(xmm0, faddress(rbx)); |
|
589 } |
|
590 |
|
591 void TemplateTable::wide_dload() { |
|
592 transition(vtos, dtos); |
|
593 locals_index_wide(rbx); |
|
594 __ movdbl(xmm0, daddress(rbx)); |
|
595 } |
|
596 |
|
597 void TemplateTable::wide_aload() { |
|
598 transition(vtos, atos); |
|
599 locals_index_wide(rbx); |
|
600 __ movptr(rax, aaddress(rbx)); |
|
601 } |
|
602 |
|
603 void TemplateTable::index_check(Register array, Register index) { |
|
604 // destroys rbx |
|
605 // check array |
|
606 __ null_check(array, arrayOopDesc::length_offset_in_bytes()); |
|
607 // sign extend index for use by indexed load |
|
608 __ movl2ptr(index, index); |
|
609 // check index |
|
610 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes())); |
|
611 if (index != rbx) { |
|
612 // ??? convention: move aberrant index into ebx for exception message |
|
613 assert(rbx != array, "different registers"); |
|
614 __ movl(rbx, index); |
|
615 } |
|
616 __ jump_cc(Assembler::aboveEqual, |
|
617 ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry)); |
|
618 } |
|
619 |
|
620 void TemplateTable::iaload() { |
|
621 transition(itos, itos); |
|
622 __ pop_ptr(rdx); |
|
623 // eax: index |
|
624 // rdx: array |
|
625 index_check(rdx, rax); // kills rbx |
|
626 __ movl(rax, Address(rdx, rax, |
|
627 Address::times_4, |
|
628 arrayOopDesc::base_offset_in_bytes(T_INT))); |
|
629 } |
|
630 |
|
631 void TemplateTable::laload() { |
|
632 transition(itos, ltos); |
|
633 __ pop_ptr(rdx); |
|
634 // eax: index |
|
635 // rdx: array |
|
636 index_check(rdx, rax); // kills rbx |
|
637 __ movq(rax, Address(rdx, rbx, |
|
638 Address::times_8, |
|
639 arrayOopDesc::base_offset_in_bytes(T_LONG))); |
|
640 } |
|
641 |
|
642 void TemplateTable::faload() { |
|
643 transition(itos, ftos); |
|
644 __ pop_ptr(rdx); |
|
645 // eax: index |
|
646 // rdx: array |
|
647 index_check(rdx, rax); // kills rbx |
|
648 __ movflt(xmm0, Address(rdx, rax, |
|
649 Address::times_4, |
|
650 arrayOopDesc::base_offset_in_bytes(T_FLOAT))); |
|
651 } |
|
652 |
|
653 void TemplateTable::daload() { |
|
654 transition(itos, dtos); |
|
655 __ pop_ptr(rdx); |
|
656 // eax: index |
|
657 // rdx: array |
|
658 index_check(rdx, rax); // kills rbx |
|
659 __ movdbl(xmm0, Address(rdx, rax, |
|
660 Address::times_8, |
|
661 arrayOopDesc::base_offset_in_bytes(T_DOUBLE))); |
|
662 } |
|
663 |
|
664 void TemplateTable::aaload() { |
|
665 transition(itos, atos); |
|
666 __ pop_ptr(rdx); |
|
667 // eax: index |
|
668 // rdx: array |
|
669 index_check(rdx, rax); // kills rbx |
|
670 __ load_heap_oop(rax, Address(rdx, rax, |
|
671 UseCompressedOops ? Address::times_4 : Address::times_8, |
|
672 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); |
|
673 } |
|
674 |
|
675 void TemplateTable::baload() { |
|
676 transition(itos, itos); |
|
677 __ pop_ptr(rdx); |
|
678 // eax: index |
|
679 // rdx: array |
|
680 index_check(rdx, rax); // kills rbx |
|
681 __ load_signed_byte(rax, |
|
682 Address(rdx, rax, |
|
683 Address::times_1, |
|
684 arrayOopDesc::base_offset_in_bytes(T_BYTE))); |
|
685 } |
|
686 |
|
687 void TemplateTable::caload() { |
|
688 transition(itos, itos); |
|
689 __ pop_ptr(rdx); |
|
690 // eax: index |
|
691 // rdx: array |
|
692 index_check(rdx, rax); // kills rbx |
|
693 __ load_unsigned_short(rax, |
|
694 Address(rdx, rax, |
|
695 Address::times_2, |
|
696 arrayOopDesc::base_offset_in_bytes(T_CHAR))); |
|
697 } |
|
698 |
|
699 // iload followed by caload frequent pair |
|
700 void TemplateTable::fast_icaload() { |
|
701 transition(vtos, itos); |
|
702 // load index out of locals |
|
703 locals_index(rbx); |
|
704 __ movl(rax, iaddress(rbx)); |
|
705 |
|
706 // eax: index |
|
707 // rdx: array |
|
708 __ pop_ptr(rdx); |
|
709 index_check(rdx, rax); // kills rbx |
|
710 __ load_unsigned_short(rax, |
|
711 Address(rdx, rax, |
|
712 Address::times_2, |
|
713 arrayOopDesc::base_offset_in_bytes(T_CHAR))); |
|
714 } |
|
715 |
|
716 void TemplateTable::saload() { |
|
717 transition(itos, itos); |
|
718 __ pop_ptr(rdx); |
|
719 // eax: index |
|
720 // rdx: array |
|
721 index_check(rdx, rax); // kills rbx |
|
722 __ load_signed_short(rax, |
|
723 Address(rdx, rax, |
|
724 Address::times_2, |
|
725 arrayOopDesc::base_offset_in_bytes(T_SHORT))); |
|
726 } |
|
727 |
|
728 void TemplateTable::iload(int n) { |
|
729 transition(vtos, itos); |
|
730 __ movl(rax, iaddress(n)); |
|
731 } |
|
732 |
|
733 void TemplateTable::lload(int n) { |
|
734 transition(vtos, ltos); |
|
735 __ movq(rax, laddress(n)); |
|
736 } |
|
737 |
|
738 void TemplateTable::fload(int n) { |
|
739 transition(vtos, ftos); |
|
740 __ movflt(xmm0, faddress(n)); |
|
741 } |
|
742 |
|
743 void TemplateTable::dload(int n) { |
|
744 transition(vtos, dtos); |
|
745 __ movdbl(xmm0, daddress(n)); |
|
746 } |
|
747 |
|
748 void TemplateTable::aload(int n) { |
|
749 transition(vtos, atos); |
|
750 __ movptr(rax, aaddress(n)); |
|
751 } |
|
752 |
|
753 void TemplateTable::aload_0() { |
|
754 transition(vtos, atos); |
|
755 // According to bytecode histograms, the pairs: |
|
756 // |
|
757 // _aload_0, _fast_igetfield |
|
758 // _aload_0, _fast_agetfield |
|
759 // _aload_0, _fast_fgetfield |
|
760 // |
|
761 // occur frequently. If RewriteFrequentPairs is set, the (slow) |
|
762 // _aload_0 bytecode checks if the next bytecode is either |
|
763 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then |
|
764 // rewrites the current bytecode into a pair bytecode; otherwise it |
|
765 // rewrites the current bytecode into _fast_aload_0 that doesn't do |
|
766 // the pair check anymore. |
|
767 // |
|
768 // Note: If the next bytecode is _getfield, the rewrite must be |
|
769 // delayed, otherwise we may miss an opportunity for a pair. |
|
770 // |
|
771 // Also rewrite frequent pairs |
|
772 // aload_0, aload_1 |
|
773 // aload_0, iload_1 |
|
774 // These bytecodes with a small amount of code are most profitable |
|
775 // to rewrite |
|
776 if (RewriteFrequentPairs) { |
|
777 Label rewrite, done; |
|
778 const Register bc = c_rarg3; |
|
779 assert(rbx != bc, "register damaged"); |
|
780 // get next byte |
|
781 __ load_unsigned_byte(rbx, |
|
782 at_bcp(Bytecodes::length_for(Bytecodes::_aload_0))); |
|
783 |
|
784 // do actual aload_0 |
|
785 aload(0); |
|
786 |
|
787 // if _getfield then wait with rewrite |
|
788 __ cmpl(rbx, Bytecodes::_getfield); |
|
789 __ jcc(Assembler::equal, done); |
|
790 |
|
791 // if _igetfield then reqrite to _fast_iaccess_0 |
|
792 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == |
|
793 Bytecodes::_aload_0, |
|
794 "fix bytecode definition"); |
|
795 __ cmpl(rbx, Bytecodes::_fast_igetfield); |
|
796 __ movl(bc, Bytecodes::_fast_iaccess_0); |
|
797 __ jccb(Assembler::equal, rewrite); |
|
798 |
|
799 // if _agetfield then reqrite to _fast_aaccess_0 |
|
800 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == |
|
801 Bytecodes::_aload_0, |
|
802 "fix bytecode definition"); |
|
803 __ cmpl(rbx, Bytecodes::_fast_agetfield); |
|
804 __ movl(bc, Bytecodes::_fast_aaccess_0); |
|
805 __ jccb(Assembler::equal, rewrite); |
|
806 |
|
807 // if _fgetfield then reqrite to _fast_faccess_0 |
|
808 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == |
|
809 Bytecodes::_aload_0, |
|
810 "fix bytecode definition"); |
|
811 __ cmpl(rbx, Bytecodes::_fast_fgetfield); |
|
812 __ movl(bc, Bytecodes::_fast_faccess_0); |
|
813 __ jccb(Assembler::equal, rewrite); |
|
814 |
|
815 // else rewrite to _fast_aload0 |
|
816 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == |
|
817 Bytecodes::_aload_0, |
|
818 "fix bytecode definition"); |
|
819 __ movl(bc, Bytecodes::_fast_aload_0); |
|
820 |
|
821 // rewrite |
|
822 // bc: fast bytecode |
|
823 __ bind(rewrite); |
|
824 patch_bytecode(Bytecodes::_aload_0, bc, rbx, false); |
|
825 |
|
826 __ bind(done); |
|
827 } else { |
|
828 aload(0); |
|
829 } |
|
830 } |
|
831 |
|
832 void TemplateTable::istore() { |
|
833 transition(itos, vtos); |
|
834 locals_index(rbx); |
|
835 __ movl(iaddress(rbx), rax); |
|
836 } |
|
837 |
|
838 void TemplateTable::lstore() { |
|
839 transition(ltos, vtos); |
|
840 locals_index(rbx); |
|
841 __ movq(laddress(rbx), rax); |
|
842 } |
|
843 |
|
844 void TemplateTable::fstore() { |
|
845 transition(ftos, vtos); |
|
846 locals_index(rbx); |
|
847 __ movflt(faddress(rbx), xmm0); |
|
848 } |
|
849 |
|
850 void TemplateTable::dstore() { |
|
851 transition(dtos, vtos); |
|
852 locals_index(rbx); |
|
853 __ movdbl(daddress(rbx), xmm0); |
|
854 } |
|
855 |
|
856 void TemplateTable::astore() { |
|
857 transition(vtos, vtos); |
|
858 __ pop_ptr(rax); |
|
859 locals_index(rbx); |
|
860 __ movptr(aaddress(rbx), rax); |
|
861 } |
|
862 |
|
863 void TemplateTable::wide_istore() { |
|
864 transition(vtos, vtos); |
|
865 __ pop_i(); |
|
866 locals_index_wide(rbx); |
|
867 __ movl(iaddress(rbx), rax); |
|
868 } |
|
869 |
|
870 void TemplateTable::wide_lstore() { |
|
871 transition(vtos, vtos); |
|
872 __ pop_l(); |
|
873 locals_index_wide(rbx); |
|
874 __ movq(laddress(rbx), rax); |
|
875 } |
|
876 |
|
877 void TemplateTable::wide_fstore() { |
|
878 transition(vtos, vtos); |
|
879 __ pop_f(); |
|
880 locals_index_wide(rbx); |
|
881 __ movflt(faddress(rbx), xmm0); |
|
882 } |
|
883 |
|
884 void TemplateTable::wide_dstore() { |
|
885 transition(vtos, vtos); |
|
886 __ pop_d(); |
|
887 locals_index_wide(rbx); |
|
888 __ movdbl(daddress(rbx), xmm0); |
|
889 } |
|
890 |
|
891 void TemplateTable::wide_astore() { |
|
892 transition(vtos, vtos); |
|
893 __ pop_ptr(rax); |
|
894 locals_index_wide(rbx); |
|
895 __ movptr(aaddress(rbx), rax); |
|
896 } |
|
897 |
|
898 void TemplateTable::iastore() { |
|
899 transition(itos, vtos); |
|
900 __ pop_i(rbx); |
|
901 __ pop_ptr(rdx); |
|
902 // eax: value |
|
903 // ebx: index |
|
904 // rdx: array |
|
905 index_check(rdx, rbx); // prefer index in ebx |
|
906 __ movl(Address(rdx, rbx, |
|
907 Address::times_4, |
|
908 arrayOopDesc::base_offset_in_bytes(T_INT)), |
|
909 rax); |
|
910 } |
|
911 |
|
912 void TemplateTable::lastore() { |
|
913 transition(ltos, vtos); |
|
914 __ pop_i(rbx); |
|
915 __ pop_ptr(rdx); |
|
916 // rax: value |
|
917 // ebx: index |
|
918 // rdx: array |
|
919 index_check(rdx, rbx); // prefer index in ebx |
|
920 __ movq(Address(rdx, rbx, |
|
921 Address::times_8, |
|
922 arrayOopDesc::base_offset_in_bytes(T_LONG)), |
|
923 rax); |
|
924 } |
|
925 |
|
926 void TemplateTable::fastore() { |
|
927 transition(ftos, vtos); |
|
928 __ pop_i(rbx); |
|
929 __ pop_ptr(rdx); |
|
930 // xmm0: value |
|
931 // ebx: index |
|
932 // rdx: array |
|
933 index_check(rdx, rbx); // prefer index in ebx |
|
934 __ movflt(Address(rdx, rbx, |
|
935 Address::times_4, |
|
936 arrayOopDesc::base_offset_in_bytes(T_FLOAT)), |
|
937 xmm0); |
|
938 } |
|
939 |
|
940 void TemplateTable::dastore() { |
|
941 transition(dtos, vtos); |
|
942 __ pop_i(rbx); |
|
943 __ pop_ptr(rdx); |
|
944 // xmm0: value |
|
945 // ebx: index |
|
946 // rdx: array |
|
947 index_check(rdx, rbx); // prefer index in ebx |
|
948 __ movdbl(Address(rdx, rbx, |
|
949 Address::times_8, |
|
950 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)), |
|
951 xmm0); |
|
952 } |
|
953 |
|
954 void TemplateTable::aastore() { |
|
955 Label is_null, ok_is_subtype, done; |
|
956 transition(vtos, vtos); |
|
957 // stack: ..., array, index, value |
|
958 __ movptr(rax, at_tos()); // value |
|
959 __ movl(rcx, at_tos_p1()); // index |
|
960 __ movptr(rdx, at_tos_p2()); // array |
|
961 |
|
962 Address element_address(rdx, rcx, |
|
963 UseCompressedOops? Address::times_4 : Address::times_8, |
|
964 arrayOopDesc::base_offset_in_bytes(T_OBJECT)); |
|
965 |
|
966 index_check(rdx, rcx); // kills rbx |
|
967 // do array store check - check for NULL value first |
|
968 __ testptr(rax, rax); |
|
969 __ jcc(Assembler::zero, is_null); |
|
970 |
|
971 // Move subklass into rbx |
|
972 __ load_klass(rbx, rax); |
|
973 // Move superklass into rax |
|
974 __ load_klass(rax, rdx); |
|
975 __ movptr(rax, Address(rax, |
|
976 ObjArrayKlass::element_klass_offset())); |
|
977 // Compress array + index*oopSize + 12 into a single register. Frees rcx. |
|
978 __ lea(rdx, element_address); |
|
979 |
|
980 // Generate subtype check. Blows rcx, rdi |
|
981 // Superklass in rax. Subklass in rbx. |
|
982 __ gen_subtype_check(rbx, ok_is_subtype); |
|
983 |
|
984 // Come here on failure |
|
985 // object is at TOS |
|
986 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry)); |
|
987 |
|
988 // Come here on success |
|
989 __ bind(ok_is_subtype); |
|
990 |
|
991 // Get the value we will store |
|
992 __ movptr(rax, at_tos()); |
|
993 // Now store using the appropriate barrier |
|
994 do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true); |
|
995 __ jmp(done); |
|
996 |
|
997 // Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx] |
|
998 __ bind(is_null); |
|
999 __ profile_null_seen(rbx); |
|
1000 |
|
1001 // Store a NULL |
|
1002 do_oop_store(_masm, element_address, noreg, _bs->kind(), true); |
|
1003 |
|
1004 // Pop stack arguments |
|
1005 __ bind(done); |
|
1006 __ addptr(rsp, 3 * Interpreter::stackElementSize); |
|
1007 } |
|
1008 |
|
1009 void TemplateTable::bastore() { |
|
1010 transition(itos, vtos); |
|
1011 __ pop_i(rbx); |
|
1012 __ pop_ptr(rdx); |
|
1013 // eax: value |
|
1014 // ebx: index |
|
1015 // rdx: array |
|
1016 index_check(rdx, rbx); // prefer index in ebx |
|
1017 __ movb(Address(rdx, rbx, |
|
1018 Address::times_1, |
|
1019 arrayOopDesc::base_offset_in_bytes(T_BYTE)), |
|
1020 rax); |
|
1021 } |
|
1022 |
|
1023 void TemplateTable::castore() { |
|
1024 transition(itos, vtos); |
|
1025 __ pop_i(rbx); |
|
1026 __ pop_ptr(rdx); |
|
1027 // eax: value |
|
1028 // ebx: index |
|
1029 // rdx: array |
|
1030 index_check(rdx, rbx); // prefer index in ebx |
|
1031 __ movw(Address(rdx, rbx, |
|
1032 Address::times_2, |
|
1033 arrayOopDesc::base_offset_in_bytes(T_CHAR)), |
|
1034 rax); |
|
1035 } |
|
1036 |
|
1037 void TemplateTable::sastore() { |
|
1038 castore(); |
|
1039 } |
|
1040 |
|
1041 void TemplateTable::istore(int n) { |
|
1042 transition(itos, vtos); |
|
1043 __ movl(iaddress(n), rax); |
|
1044 } |
|
1045 |
|
1046 void TemplateTable::lstore(int n) { |
|
1047 transition(ltos, vtos); |
|
1048 __ movq(laddress(n), rax); |
|
1049 } |
|
1050 |
|
1051 void TemplateTable::fstore(int n) { |
|
1052 transition(ftos, vtos); |
|
1053 __ movflt(faddress(n), xmm0); |
|
1054 } |
|
1055 |
|
1056 void TemplateTable::dstore(int n) { |
|
1057 transition(dtos, vtos); |
|
1058 __ movdbl(daddress(n), xmm0); |
|
1059 } |
|
1060 |
|
1061 void TemplateTable::astore(int n) { |
|
1062 transition(vtos, vtos); |
|
1063 __ pop_ptr(rax); |
|
1064 __ movptr(aaddress(n), rax); |
|
1065 } |
|
1066 |
|
1067 void TemplateTable::pop() { |
|
1068 transition(vtos, vtos); |
|
1069 __ addptr(rsp, Interpreter::stackElementSize); |
|
1070 } |
|
1071 |
|
1072 void TemplateTable::pop2() { |
|
1073 transition(vtos, vtos); |
|
1074 __ addptr(rsp, 2 * Interpreter::stackElementSize); |
|
1075 } |
|
1076 |
|
1077 void TemplateTable::dup() { |
|
1078 transition(vtos, vtos); |
|
1079 __ load_ptr(0, rax); |
|
1080 __ push_ptr(rax); |
|
1081 // stack: ..., a, a |
|
1082 } |
|
1083 |
|
1084 void TemplateTable::dup_x1() { |
|
1085 transition(vtos, vtos); |
|
1086 // stack: ..., a, b |
|
1087 __ load_ptr( 0, rax); // load b |
|
1088 __ load_ptr( 1, rcx); // load a |
|
1089 __ store_ptr(1, rax); // store b |
|
1090 __ store_ptr(0, rcx); // store a |
|
1091 __ push_ptr(rax); // push b |
|
1092 // stack: ..., b, a, b |
|
1093 } |
|
1094 |
|
1095 void TemplateTable::dup_x2() { |
|
1096 transition(vtos, vtos); |
|
1097 // stack: ..., a, b, c |
|
1098 __ load_ptr( 0, rax); // load c |
|
1099 __ load_ptr( 2, rcx); // load a |
|
1100 __ store_ptr(2, rax); // store c in a |
|
1101 __ push_ptr(rax); // push c |
|
1102 // stack: ..., c, b, c, c |
|
1103 __ load_ptr( 2, rax); // load b |
|
1104 __ store_ptr(2, rcx); // store a in b |
|
1105 // stack: ..., c, a, c, c |
|
1106 __ store_ptr(1, rax); // store b in c |
|
1107 // stack: ..., c, a, b, c |
|
1108 } |
|
1109 |
|
1110 void TemplateTable::dup2() { |
|
1111 transition(vtos, vtos); |
|
1112 // stack: ..., a, b |
|
1113 __ load_ptr(1, rax); // load a |
|
1114 __ push_ptr(rax); // push a |
|
1115 __ load_ptr(1, rax); // load b |
|
1116 __ push_ptr(rax); // push b |
|
1117 // stack: ..., a, b, a, b |
|
1118 } |
|
1119 |
|
1120 void TemplateTable::dup2_x1() { |
|
1121 transition(vtos, vtos); |
|
1122 // stack: ..., a, b, c |
|
1123 __ load_ptr( 0, rcx); // load c |
|
1124 __ load_ptr( 1, rax); // load b |
|
1125 __ push_ptr(rax); // push b |
|
1126 __ push_ptr(rcx); // push c |
|
1127 // stack: ..., a, b, c, b, c |
|
1128 __ store_ptr(3, rcx); // store c in b |
|
1129 // stack: ..., a, c, c, b, c |
|
1130 __ load_ptr( 4, rcx); // load a |
|
1131 __ store_ptr(2, rcx); // store a in 2nd c |
|
1132 // stack: ..., a, c, a, b, c |
|
1133 __ store_ptr(4, rax); // store b in a |
|
1134 // stack: ..., b, c, a, b, c |
|
1135 } |
|
1136 |
|
1137 void TemplateTable::dup2_x2() { |
|
1138 transition(vtos, vtos); |
|
1139 // stack: ..., a, b, c, d |
|
1140 __ load_ptr( 0, rcx); // load d |
|
1141 __ load_ptr( 1, rax); // load c |
|
1142 __ push_ptr(rax); // push c |
|
1143 __ push_ptr(rcx); // push d |
|
1144 // stack: ..., a, b, c, d, c, d |
|
1145 __ load_ptr( 4, rax); // load b |
|
1146 __ store_ptr(2, rax); // store b in d |
|
1147 __ store_ptr(4, rcx); // store d in b |
|
1148 // stack: ..., a, d, c, b, c, d |
|
1149 __ load_ptr( 5, rcx); // load a |
|
1150 __ load_ptr( 3, rax); // load c |
|
1151 __ store_ptr(3, rcx); // store a in c |
|
1152 __ store_ptr(5, rax); // store c in a |
|
1153 // stack: ..., c, d, a, b, c, d |
|
1154 } |
|
1155 |
|
1156 void TemplateTable::swap() { |
|
1157 transition(vtos, vtos); |
|
1158 // stack: ..., a, b |
|
1159 __ load_ptr( 1, rcx); // load a |
|
1160 __ load_ptr( 0, rax); // load b |
|
1161 __ store_ptr(0, rcx); // store a in b |
|
1162 __ store_ptr(1, rax); // store b in a |
|
1163 // stack: ..., b, a |
|
1164 } |
|
1165 |
|
1166 void TemplateTable::iop2(Operation op) { |
|
1167 transition(itos, itos); |
|
1168 switch (op) { |
|
1169 case add : __ pop_i(rdx); __ addl (rax, rdx); break; |
|
1170 case sub : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break; |
|
1171 case mul : __ pop_i(rdx); __ imull(rax, rdx); break; |
|
1172 case _and : __ pop_i(rdx); __ andl (rax, rdx); break; |
|
1173 case _or : __ pop_i(rdx); __ orl (rax, rdx); break; |
|
1174 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break; |
|
1175 case shl : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax); break; |
|
1176 case shr : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax); break; |
|
1177 case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax); break; |
|
1178 default : ShouldNotReachHere(); |
|
1179 } |
|
1180 } |
|
1181 |
|
1182 void TemplateTable::lop2(Operation op) { |
|
1183 transition(ltos, ltos); |
|
1184 switch (op) { |
|
1185 case add : __ pop_l(rdx); __ addptr(rax, rdx); break; |
|
1186 case sub : __ mov(rdx, rax); __ pop_l(rax); __ subptr(rax, rdx); break; |
|
1187 case _and : __ pop_l(rdx); __ andptr(rax, rdx); break; |
|
1188 case _or : __ pop_l(rdx); __ orptr (rax, rdx); break; |
|
1189 case _xor : __ pop_l(rdx); __ xorptr(rax, rdx); break; |
|
1190 default : ShouldNotReachHere(); |
|
1191 } |
|
1192 } |
|
1193 |
|
1194 void TemplateTable::idiv() { |
|
1195 transition(itos, itos); |
|
1196 __ movl(rcx, rax); |
|
1197 __ pop_i(rax); |
|
1198 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If |
|
1199 // they are not equal, one could do a normal division (no correction |
|
1200 // needed), which may speed up this implementation for the common case. |
|
1201 // (see also JVM spec., p.243 & p.271) |
|
1202 __ corrected_idivl(rcx); |
|
1203 } |
|
1204 |
|
1205 void TemplateTable::irem() { |
|
1206 transition(itos, itos); |
|
1207 __ movl(rcx, rax); |
|
1208 __ pop_i(rax); |
|
1209 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If |
|
1210 // they are not equal, one could do a normal division (no correction |
|
1211 // needed), which may speed up this implementation for the common case. |
|
1212 // (see also JVM spec., p.243 & p.271) |
|
1213 __ corrected_idivl(rcx); |
|
1214 __ movl(rax, rdx); |
|
1215 } |
|
1216 |
|
1217 void TemplateTable::lmul() { |
|
1218 transition(ltos, ltos); |
|
1219 __ pop_l(rdx); |
|
1220 __ imulq(rax, rdx); |
|
1221 } |
|
1222 |
|
1223 void TemplateTable::ldiv() { |
|
1224 transition(ltos, ltos); |
|
1225 __ mov(rcx, rax); |
|
1226 __ pop_l(rax); |
|
1227 // generate explicit div0 check |
|
1228 __ testq(rcx, rcx); |
|
1229 __ jump_cc(Assembler::zero, |
|
1230 ExternalAddress(Interpreter::_throw_ArithmeticException_entry)); |
|
1231 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If |
|
1232 // they are not equal, one could do a normal division (no correction |
|
1233 // needed), which may speed up this implementation for the common case. |
|
1234 // (see also JVM spec., p.243 & p.271) |
|
1235 __ corrected_idivq(rcx); // kills rbx |
|
1236 } |
|
1237 |
|
1238 void TemplateTable::lrem() { |
|
1239 transition(ltos, ltos); |
|
1240 __ mov(rcx, rax); |
|
1241 __ pop_l(rax); |
|
1242 __ testq(rcx, rcx); |
|
1243 __ jump_cc(Assembler::zero, |
|
1244 ExternalAddress(Interpreter::_throw_ArithmeticException_entry)); |
|
1245 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If |
|
1246 // they are not equal, one could do a normal division (no correction |
|
1247 // needed), which may speed up this implementation for the common case. |
|
1248 // (see also JVM spec., p.243 & p.271) |
|
1249 __ corrected_idivq(rcx); // kills rbx |
|
1250 __ mov(rax, rdx); |
|
1251 } |
|
1252 |
|
1253 void TemplateTable::lshl() { |
|
1254 transition(itos, ltos); |
|
1255 __ movl(rcx, rax); // get shift count |
|
1256 __ pop_l(rax); // get shift value |
|
1257 __ shlq(rax); |
|
1258 } |
|
1259 |
|
1260 void TemplateTable::lshr() { |
|
1261 transition(itos, ltos); |
|
1262 __ movl(rcx, rax); // get shift count |
|
1263 __ pop_l(rax); // get shift value |
|
1264 __ sarq(rax); |
|
1265 } |
|
1266 |
|
1267 void TemplateTable::lushr() { |
|
1268 transition(itos, ltos); |
|
1269 __ movl(rcx, rax); // get shift count |
|
1270 __ pop_l(rax); // get shift value |
|
1271 __ shrq(rax); |
|
1272 } |
|
1273 |
|
1274 void TemplateTable::fop2(Operation op) { |
|
1275 transition(ftos, ftos); |
|
1276 switch (op) { |
|
1277 case add: |
|
1278 __ addss(xmm0, at_rsp()); |
|
1279 __ addptr(rsp, Interpreter::stackElementSize); |
|
1280 break; |
|
1281 case sub: |
|
1282 __ movflt(xmm1, xmm0); |
|
1283 __ pop_f(xmm0); |
|
1284 __ subss(xmm0, xmm1); |
|
1285 break; |
|
1286 case mul: |
|
1287 __ mulss(xmm0, at_rsp()); |
|
1288 __ addptr(rsp, Interpreter::stackElementSize); |
|
1289 break; |
|
1290 case div: |
|
1291 __ movflt(xmm1, xmm0); |
|
1292 __ pop_f(xmm0); |
|
1293 __ divss(xmm0, xmm1); |
|
1294 break; |
|
1295 case rem: |
|
1296 __ movflt(xmm1, xmm0); |
|
1297 __ pop_f(xmm0); |
|
1298 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2); |
|
1299 break; |
|
1300 default: |
|
1301 ShouldNotReachHere(); |
|
1302 break; |
|
1303 } |
|
1304 } |
|
1305 |
|
1306 void TemplateTable::dop2(Operation op) { |
|
1307 transition(dtos, dtos); |
|
1308 switch (op) { |
|
1309 case add: |
|
1310 __ addsd(xmm0, at_rsp()); |
|
1311 __ addptr(rsp, 2 * Interpreter::stackElementSize); |
|
1312 break; |
|
1313 case sub: |
|
1314 __ movdbl(xmm1, xmm0); |
|
1315 __ pop_d(xmm0); |
|
1316 __ subsd(xmm0, xmm1); |
|
1317 break; |
|
1318 case mul: |
|
1319 __ mulsd(xmm0, at_rsp()); |
|
1320 __ addptr(rsp, 2 * Interpreter::stackElementSize); |
|
1321 break; |
|
1322 case div: |
|
1323 __ movdbl(xmm1, xmm0); |
|
1324 __ pop_d(xmm0); |
|
1325 __ divsd(xmm0, xmm1); |
|
1326 break; |
|
1327 case rem: |
|
1328 __ movdbl(xmm1, xmm0); |
|
1329 __ pop_d(xmm0); |
|
1330 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2); |
|
1331 break; |
|
1332 default: |
|
1333 ShouldNotReachHere(); |
|
1334 break; |
|
1335 } |
|
1336 } |
|
1337 |
|
1338 void TemplateTable::ineg() { |
|
1339 transition(itos, itos); |
|
1340 __ negl(rax); |
|
1341 } |
|
1342 |
|
1343 void TemplateTable::lneg() { |
|
1344 transition(ltos, ltos); |
|
1345 __ negq(rax); |
|
1346 } |
|
1347 |
|
1348 // Note: 'double' and 'long long' have 32-bits alignment on x86. |
|
1349 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) { |
|
1350 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address |
|
1351 // of 128-bits operands for SSE instructions. |
|
1352 jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF))); |
|
1353 // Store the value to a 128-bits operand. |
|
1354 operand[0] = lo; |
|
1355 operand[1] = hi; |
|
1356 return operand; |
|
1357 } |
|
1358 |
|
1359 // Buffer for 128-bits masks used by SSE instructions. |
|
1360 static jlong float_signflip_pool[2*2]; |
|
1361 static jlong double_signflip_pool[2*2]; |
|
1362 |
|
1363 void TemplateTable::fneg() { |
|
1364 transition(ftos, ftos); |
|
1365 static jlong *float_signflip = double_quadword(&float_signflip_pool[1], 0x8000000080000000, 0x8000000080000000); |
|
1366 __ xorps(xmm0, ExternalAddress((address) float_signflip)); |
|
1367 } |
|
1368 |
|
1369 void TemplateTable::dneg() { |
|
1370 transition(dtos, dtos); |
|
1371 static jlong *double_signflip = double_quadword(&double_signflip_pool[1], 0x8000000000000000, 0x8000000000000000); |
|
1372 __ xorpd(xmm0, ExternalAddress((address) double_signflip)); |
|
1373 } |
|
1374 |
|
1375 void TemplateTable::iinc() { |
|
1376 transition(vtos, vtos); |
|
1377 __ load_signed_byte(rdx, at_bcp(2)); // get constant |
|
1378 locals_index(rbx); |
|
1379 __ addl(iaddress(rbx), rdx); |
|
1380 } |
|
1381 |
|
1382 void TemplateTable::wide_iinc() { |
|
1383 transition(vtos, vtos); |
|
1384 __ movl(rdx, at_bcp(4)); // get constant |
|
1385 locals_index_wide(rbx); |
|
1386 __ bswapl(rdx); // swap bytes & sign-extend constant |
|
1387 __ sarl(rdx, 16); |
|
1388 __ addl(iaddress(rbx), rdx); |
|
1389 // Note: should probably use only one movl to get both |
|
1390 // the index and the constant -> fix this |
|
1391 } |
|
1392 |
|
1393 void TemplateTable::convert() { |
|
1394 // Checking |
|
1395 #ifdef ASSERT |
|
1396 { |
|
1397 TosState tos_in = ilgl; |
|
1398 TosState tos_out = ilgl; |
|
1399 switch (bytecode()) { |
|
1400 case Bytecodes::_i2l: // fall through |
|
1401 case Bytecodes::_i2f: // fall through |
|
1402 case Bytecodes::_i2d: // fall through |
|
1403 case Bytecodes::_i2b: // fall through |
|
1404 case Bytecodes::_i2c: // fall through |
|
1405 case Bytecodes::_i2s: tos_in = itos; break; |
|
1406 case Bytecodes::_l2i: // fall through |
|
1407 case Bytecodes::_l2f: // fall through |
|
1408 case Bytecodes::_l2d: tos_in = ltos; break; |
|
1409 case Bytecodes::_f2i: // fall through |
|
1410 case Bytecodes::_f2l: // fall through |
|
1411 case Bytecodes::_f2d: tos_in = ftos; break; |
|
1412 case Bytecodes::_d2i: // fall through |
|
1413 case Bytecodes::_d2l: // fall through |
|
1414 case Bytecodes::_d2f: tos_in = dtos; break; |
|
1415 default : ShouldNotReachHere(); |
|
1416 } |
|
1417 switch (bytecode()) { |
|
1418 case Bytecodes::_l2i: // fall through |
|
1419 case Bytecodes::_f2i: // fall through |
|
1420 case Bytecodes::_d2i: // fall through |
|
1421 case Bytecodes::_i2b: // fall through |
|
1422 case Bytecodes::_i2c: // fall through |
|
1423 case Bytecodes::_i2s: tos_out = itos; break; |
|
1424 case Bytecodes::_i2l: // fall through |
|
1425 case Bytecodes::_f2l: // fall through |
|
1426 case Bytecodes::_d2l: tos_out = ltos; break; |
|
1427 case Bytecodes::_i2f: // fall through |
|
1428 case Bytecodes::_l2f: // fall through |
|
1429 case Bytecodes::_d2f: tos_out = ftos; break; |
|
1430 case Bytecodes::_i2d: // fall through |
|
1431 case Bytecodes::_l2d: // fall through |
|
1432 case Bytecodes::_f2d: tos_out = dtos; break; |
|
1433 default : ShouldNotReachHere(); |
|
1434 } |
|
1435 transition(tos_in, tos_out); |
|
1436 } |
|
1437 #endif // ASSERT |
|
1438 |
|
1439 static const int64_t is_nan = 0x8000000000000000L; |
|
1440 |
|
1441 // Conversion |
|
1442 switch (bytecode()) { |
|
1443 case Bytecodes::_i2l: |
|
1444 __ movslq(rax, rax); |
|
1445 break; |
|
1446 case Bytecodes::_i2f: |
|
1447 __ cvtsi2ssl(xmm0, rax); |
|
1448 break; |
|
1449 case Bytecodes::_i2d: |
|
1450 __ cvtsi2sdl(xmm0, rax); |
|
1451 break; |
|
1452 case Bytecodes::_i2b: |
|
1453 __ movsbl(rax, rax); |
|
1454 break; |
|
1455 case Bytecodes::_i2c: |
|
1456 __ movzwl(rax, rax); |
|
1457 break; |
|
1458 case Bytecodes::_i2s: |
|
1459 __ movswl(rax, rax); |
|
1460 break; |
|
1461 case Bytecodes::_l2i: |
|
1462 __ movl(rax, rax); |
|
1463 break; |
|
1464 case Bytecodes::_l2f: |
|
1465 __ cvtsi2ssq(xmm0, rax); |
|
1466 break; |
|
1467 case Bytecodes::_l2d: |
|
1468 __ cvtsi2sdq(xmm0, rax); |
|
1469 break; |
|
1470 case Bytecodes::_f2i: |
|
1471 { |
|
1472 Label L; |
|
1473 __ cvttss2sil(rax, xmm0); |
|
1474 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow? |
|
1475 __ jcc(Assembler::notEqual, L); |
|
1476 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1); |
|
1477 __ bind(L); |
|
1478 } |
|
1479 break; |
|
1480 case Bytecodes::_f2l: |
|
1481 { |
|
1482 Label L; |
|
1483 __ cvttss2siq(rax, xmm0); |
|
1484 // NaN or overflow/underflow? |
|
1485 __ cmp64(rax, ExternalAddress((address) &is_nan)); |
|
1486 __ jcc(Assembler::notEqual, L); |
|
1487 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1); |
|
1488 __ bind(L); |
|
1489 } |
|
1490 break; |
|
1491 case Bytecodes::_f2d: |
|
1492 __ cvtss2sd(xmm0, xmm0); |
|
1493 break; |
|
1494 case Bytecodes::_d2i: |
|
1495 { |
|
1496 Label L; |
|
1497 __ cvttsd2sil(rax, xmm0); |
|
1498 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow? |
|
1499 __ jcc(Assembler::notEqual, L); |
|
1500 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1); |
|
1501 __ bind(L); |
|
1502 } |
|
1503 break; |
|
1504 case Bytecodes::_d2l: |
|
1505 { |
|
1506 Label L; |
|
1507 __ cvttsd2siq(rax, xmm0); |
|
1508 // NaN or overflow/underflow? |
|
1509 __ cmp64(rax, ExternalAddress((address) &is_nan)); |
|
1510 __ jcc(Assembler::notEqual, L); |
|
1511 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1); |
|
1512 __ bind(L); |
|
1513 } |
|
1514 break; |
|
1515 case Bytecodes::_d2f: |
|
1516 __ cvtsd2ss(xmm0, xmm0); |
|
1517 break; |
|
1518 default: |
|
1519 ShouldNotReachHere(); |
|
1520 } |
|
1521 } |
|
1522 |
|
1523 void TemplateTable::lcmp() { |
|
1524 transition(ltos, itos); |
|
1525 Label done; |
|
1526 __ pop_l(rdx); |
|
1527 __ cmpq(rdx, rax); |
|
1528 __ movl(rax, -1); |
|
1529 __ jccb(Assembler::less, done); |
|
1530 __ setb(Assembler::notEqual, rax); |
|
1531 __ movzbl(rax, rax); |
|
1532 __ bind(done); |
|
1533 } |
|
1534 |
|
1535 void TemplateTable::float_cmp(bool is_float, int unordered_result) { |
|
1536 Label done; |
|
1537 if (is_float) { |
|
1538 // XXX get rid of pop here, use ... reg, mem32 |
|
1539 __ pop_f(xmm1); |
|
1540 __ ucomiss(xmm1, xmm0); |
|
1541 } else { |
|
1542 // XXX get rid of pop here, use ... reg, mem64 |
|
1543 __ pop_d(xmm1); |
|
1544 __ ucomisd(xmm1, xmm0); |
|
1545 } |
|
1546 if (unordered_result < 0) { |
|
1547 __ movl(rax, -1); |
|
1548 __ jccb(Assembler::parity, done); |
|
1549 __ jccb(Assembler::below, done); |
|
1550 __ setb(Assembler::notEqual, rdx); |
|
1551 __ movzbl(rax, rdx); |
|
1552 } else { |
|
1553 __ movl(rax, 1); |
|
1554 __ jccb(Assembler::parity, done); |
|
1555 __ jccb(Assembler::above, done); |
|
1556 __ movl(rax, 0); |
|
1557 __ jccb(Assembler::equal, done); |
|
1558 __ decrementl(rax); |
|
1559 } |
|
1560 __ bind(done); |
|
1561 } |
|
1562 |
|
1563 void TemplateTable::branch(bool is_jsr, bool is_wide) { |
|
1564 __ get_method(rcx); // rcx holds method |
|
1565 __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx |
|
1566 // holds bumped taken count |
|
1567 |
|
1568 const ByteSize be_offset = MethodCounters::backedge_counter_offset() + |
|
1569 InvocationCounter::counter_offset(); |
|
1570 const ByteSize inv_offset = MethodCounters::invocation_counter_offset() + |
|
1571 InvocationCounter::counter_offset(); |
|
1572 |
|
1573 // Load up edx with the branch displacement |
|
1574 if (is_wide) { |
|
1575 __ movl(rdx, at_bcp(1)); |
|
1576 } else { |
|
1577 __ load_signed_short(rdx, at_bcp(1)); |
|
1578 } |
|
1579 __ bswapl(rdx); |
|
1580 |
|
1581 if (!is_wide) { |
|
1582 __ sarl(rdx, 16); |
|
1583 } |
|
1584 __ movl2ptr(rdx, rdx); |
|
1585 |
|
1586 // Handle all the JSR stuff here, then exit. |
|
1587 // It's much shorter and cleaner than intermingling with the non-JSR |
|
1588 // normal-branch stuff occurring below. |
|
1589 if (is_jsr) { |
|
1590 // Pre-load the next target bytecode into rbx |
|
1591 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1, 0)); |
|
1592 |
|
1593 // compute return address as bci in rax |
|
1594 __ lea(rax, at_bcp((is_wide ? 5 : 3) - |
|
1595 in_bytes(ConstMethod::codes_offset()))); |
|
1596 __ subptr(rax, Address(rcx, Method::const_offset())); |
|
1597 // Adjust the bcp in r13 by the displacement in rdx |
|
1598 __ addptr(r13, rdx); |
|
1599 // jsr returns atos that is not an oop |
|
1600 __ push_i(rax); |
|
1601 __ dispatch_only(vtos); |
|
1602 return; |
|
1603 } |
|
1604 |
|
1605 // Normal (non-jsr) branch handling |
|
1606 |
|
1607 // Adjust the bcp in r13 by the displacement in rdx |
|
1608 __ addptr(r13, rdx); |
|
1609 |
|
1610 assert(UseLoopCounter || !UseOnStackReplacement, |
|
1611 "on-stack-replacement requires loop counters"); |
|
1612 Label backedge_counter_overflow; |
|
1613 Label profile_method; |
|
1614 Label dispatch; |
|
1615 if (UseLoopCounter) { |
|
1616 // increment backedge counter for backward branches |
|
1617 // rax: MDO |
|
1618 // ebx: MDO bumped taken-count |
|
1619 // rcx: method |
|
1620 // rdx: target offset |
|
1621 // r13: target bcp |
|
1622 // r14: locals pointer |
|
1623 __ testl(rdx, rdx); // check if forward or backward branch |
|
1624 __ jcc(Assembler::positive, dispatch); // count only if backward branch |
|
1625 |
|
1626 // check if MethodCounters exists |
|
1627 Label has_counters; |
|
1628 __ movptr(rax, Address(rcx, Method::method_counters_offset())); |
|
1629 __ testptr(rax, rax); |
|
1630 __ jcc(Assembler::notZero, has_counters); |
|
1631 __ push(rdx); |
|
1632 __ push(rcx); |
|
1633 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters), |
|
1634 rcx); |
|
1635 __ pop(rcx); |
|
1636 __ pop(rdx); |
|
1637 __ movptr(rax, Address(rcx, Method::method_counters_offset())); |
|
1638 __ jcc(Assembler::zero, dispatch); |
|
1639 __ bind(has_counters); |
|
1640 |
|
1641 if (TieredCompilation) { |
|
1642 Label no_mdo; |
|
1643 int increment = InvocationCounter::count_increment; |
|
1644 if (ProfileInterpreter) { |
|
1645 // Are we profiling? |
|
1646 __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset()))); |
|
1647 __ testptr(rbx, rbx); |
|
1648 __ jccb(Assembler::zero, no_mdo); |
|
1649 // Increment the MDO backedge counter |
|
1650 const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) + |
|
1651 in_bytes(InvocationCounter::counter_offset())); |
|
1652 const Address mask(rbx, in_bytes(MethodData::backedge_mask_offset())); |
|
1653 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, |
|
1654 rax, false, Assembler::zero, &backedge_counter_overflow); |
|
1655 __ jmp(dispatch); |
|
1656 } |
|
1657 __ bind(no_mdo); |
|
1658 // Increment backedge counter in MethodCounters* |
|
1659 __ movptr(rcx, Address(rcx, Method::method_counters_offset())); |
|
1660 const Address mask(rcx, in_bytes(MethodCounters::backedge_mask_offset())); |
|
1661 __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask, |
|
1662 rax, false, Assembler::zero, &backedge_counter_overflow); |
|
1663 } else { // not TieredCompilation |
|
1664 // increment counter |
|
1665 __ movptr(rcx, Address(rcx, Method::method_counters_offset())); |
|
1666 __ movl(rax, Address(rcx, be_offset)); // load backedge counter |
|
1667 __ incrementl(rax, InvocationCounter::count_increment); // increment counter |
|
1668 __ movl(Address(rcx, be_offset), rax); // store counter |
|
1669 |
|
1670 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter |
|
1671 |
|
1672 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits |
|
1673 __ addl(rax, Address(rcx, be_offset)); // add both counters |
|
1674 |
|
1675 if (ProfileInterpreter) { |
|
1676 // Test to see if we should create a method data oop |
|
1677 __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_profile_limit_offset()))); |
|
1678 __ jcc(Assembler::less, dispatch); |
|
1679 |
|
1680 // if no method data exists, go to profile method |
|
1681 __ test_method_data_pointer(rax, profile_method); |
|
1682 |
|
1683 if (UseOnStackReplacement) { |
|
1684 // check for overflow against ebx which is the MDO taken count |
|
1685 __ cmp32(rbx, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()))); |
|
1686 __ jcc(Assembler::below, dispatch); |
|
1687 |
|
1688 // When ProfileInterpreter is on, the backedge_count comes |
|
1689 // from the MethodData*, which value does not get reset on |
|
1690 // the call to frequency_counter_overflow(). To avoid |
|
1691 // excessive calls to the overflow routine while the method is |
|
1692 // being compiled, add a second test to make sure the overflow |
|
1693 // function is called only once every overflow_frequency. |
|
1694 const int overflow_frequency = 1024; |
|
1695 __ andl(rbx, overflow_frequency - 1); |
|
1696 __ jcc(Assembler::zero, backedge_counter_overflow); |
|
1697 |
|
1698 } |
|
1699 } else { |
|
1700 if (UseOnStackReplacement) { |
|
1701 // check for overflow against eax, which is the sum of the |
|
1702 // counters |
|
1703 __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()))); |
|
1704 __ jcc(Assembler::aboveEqual, backedge_counter_overflow); |
|
1705 |
|
1706 } |
|
1707 } |
|
1708 } |
|
1709 __ bind(dispatch); |
|
1710 } |
|
1711 |
|
1712 // Pre-load the next target bytecode into rbx |
|
1713 __ load_unsigned_byte(rbx, Address(r13, 0)); |
|
1714 |
|
1715 // continue with the bytecode @ target |
|
1716 // eax: return bci for jsr's, unused otherwise |
|
1717 // ebx: target bytecode |
|
1718 // r13: target bcp |
|
1719 __ dispatch_only(vtos); |
|
1720 |
|
1721 if (UseLoopCounter) { |
|
1722 if (ProfileInterpreter) { |
|
1723 // Out-of-line code to allocate method data oop. |
|
1724 __ bind(profile_method); |
|
1725 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); |
|
1726 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode |
|
1727 __ set_method_data_pointer_for_bcp(); |
|
1728 __ jmp(dispatch); |
|
1729 } |
|
1730 |
|
1731 if (UseOnStackReplacement) { |
|
1732 // invocation counter overflow |
|
1733 __ bind(backedge_counter_overflow); |
|
1734 __ negptr(rdx); |
|
1735 __ addptr(rdx, r13); // branch bcp |
|
1736 // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp) |
|
1737 __ call_VM(noreg, |
|
1738 CAST_FROM_FN_PTR(address, |
|
1739 InterpreterRuntime::frequency_counter_overflow), |
|
1740 rdx); |
|
1741 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode |
|
1742 |
|
1743 // rax: osr nmethod (osr ok) or NULL (osr not possible) |
|
1744 // ebx: target bytecode |
|
1745 // rdx: scratch |
|
1746 // r14: locals pointer |
|
1747 // r13: bcp |
|
1748 __ testptr(rax, rax); // test result |
|
1749 __ jcc(Assembler::zero, dispatch); // no osr if null |
|
1750 // nmethod may have been invalidated (VM may block upon call_VM return) |
|
1751 __ cmpb(Address(rax, nmethod::state_offset()), nmethod::in_use); |
|
1752 __ jcc(Assembler::notEqual, dispatch); |
|
1753 |
|
1754 // We have the address of an on stack replacement routine in eax |
|
1755 // We need to prepare to execute the OSR method. First we must |
|
1756 // migrate the locals and monitors off of the stack. |
|
1757 |
|
1758 __ mov(r13, rax); // save the nmethod |
|
1759 |
|
1760 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin)); |
|
1761 |
|
1762 // eax is OSR buffer, move it to expected parameter location |
|
1763 __ mov(j_rarg0, rax); |
|
1764 |
|
1765 // We use j_rarg definitions here so that registers don't conflict as parameter |
|
1766 // registers change across platforms as we are in the midst of a calling |
|
1767 // sequence to the OSR nmethod and we don't want collision. These are NOT parameters. |
|
1768 |
|
1769 const Register retaddr = j_rarg2; |
|
1770 const Register sender_sp = j_rarg1; |
|
1771 |
|
1772 // pop the interpreter frame |
|
1773 __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp |
|
1774 __ leave(); // remove frame anchor |
|
1775 __ pop(retaddr); // get return address |
|
1776 __ mov(rsp, sender_sp); // set sp to sender sp |
|
1777 // Ensure compiled code always sees stack at proper alignment |
|
1778 __ andptr(rsp, -(StackAlignmentInBytes)); |
|
1779 |
|
1780 // unlike x86 we need no specialized return from compiled code |
|
1781 // to the interpreter or the call stub. |
|
1782 |
|
1783 // push the return address |
|
1784 __ push(retaddr); |
|
1785 |
|
1786 // and begin the OSR nmethod |
|
1787 __ jmp(Address(r13, nmethod::osr_entry_point_offset())); |
|
1788 } |
|
1789 } |
|
1790 } |
|
1791 |
|
1792 |
|
1793 void TemplateTable::if_0cmp(Condition cc) { |
|
1794 transition(itos, vtos); |
|
1795 // assume branch is more often taken than not (loops use backward branches) |
|
1796 Label not_taken; |
|
1797 __ testl(rax, rax); |
|
1798 __ jcc(j_not(cc), not_taken); |
|
1799 branch(false, false); |
|
1800 __ bind(not_taken); |
|
1801 __ profile_not_taken_branch(rax); |
|
1802 } |
|
1803 |
|
1804 void TemplateTable::if_icmp(Condition cc) { |
|
1805 transition(itos, vtos); |
|
1806 // assume branch is more often taken than not (loops use backward branches) |
|
1807 Label not_taken; |
|
1808 __ pop_i(rdx); |
|
1809 __ cmpl(rdx, rax); |
|
1810 __ jcc(j_not(cc), not_taken); |
|
1811 branch(false, false); |
|
1812 __ bind(not_taken); |
|
1813 __ profile_not_taken_branch(rax); |
|
1814 } |
|
1815 |
|
1816 void TemplateTable::if_nullcmp(Condition cc) { |
|
1817 transition(atos, vtos); |
|
1818 // assume branch is more often taken than not (loops use backward branches) |
|
1819 Label not_taken; |
|
1820 __ testptr(rax, rax); |
|
1821 __ jcc(j_not(cc), not_taken); |
|
1822 branch(false, false); |
|
1823 __ bind(not_taken); |
|
1824 __ profile_not_taken_branch(rax); |
|
1825 } |
|
1826 |
|
1827 void TemplateTable::if_acmp(Condition cc) { |
|
1828 transition(atos, vtos); |
|
1829 // assume branch is more often taken than not (loops use backward branches) |
|
1830 Label not_taken; |
|
1831 __ pop_ptr(rdx); |
|
1832 __ cmpptr(rdx, rax); |
|
1833 __ jcc(j_not(cc), not_taken); |
|
1834 branch(false, false); |
|
1835 __ bind(not_taken); |
|
1836 __ profile_not_taken_branch(rax); |
|
1837 } |
|
1838 |
|
1839 void TemplateTable::ret() { |
|
1840 transition(vtos, vtos); |
|
1841 locals_index(rbx); |
|
1842 __ movslq(rbx, iaddress(rbx)); // get return bci, compute return bcp |
|
1843 __ profile_ret(rbx, rcx); |
|
1844 __ get_method(rax); |
|
1845 __ movptr(r13, Address(rax, Method::const_offset())); |
|
1846 __ lea(r13, Address(r13, rbx, Address::times_1, |
|
1847 ConstMethod::codes_offset())); |
|
1848 __ dispatch_next(vtos); |
|
1849 } |
|
1850 |
|
1851 void TemplateTable::wide_ret() { |
|
1852 transition(vtos, vtos); |
|
1853 locals_index_wide(rbx); |
|
1854 __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp |
|
1855 __ profile_ret(rbx, rcx); |
|
1856 __ get_method(rax); |
|
1857 __ movptr(r13, Address(rax, Method::const_offset())); |
|
1858 __ lea(r13, Address(r13, rbx, Address::times_1, ConstMethod::codes_offset())); |
|
1859 __ dispatch_next(vtos); |
|
1860 } |
|
1861 |
|
1862 void TemplateTable::tableswitch() { |
|
1863 Label default_case, continue_execution; |
|
1864 transition(itos, vtos); |
|
1865 // align r13 |
|
1866 __ lea(rbx, at_bcp(BytesPerInt)); |
|
1867 __ andptr(rbx, -BytesPerInt); |
|
1868 // load lo & hi |
|
1869 __ movl(rcx, Address(rbx, BytesPerInt)); |
|
1870 __ movl(rdx, Address(rbx, 2 * BytesPerInt)); |
|
1871 __ bswapl(rcx); |
|
1872 __ bswapl(rdx); |
|
1873 // check against lo & hi |
|
1874 __ cmpl(rax, rcx); |
|
1875 __ jcc(Assembler::less, default_case); |
|
1876 __ cmpl(rax, rdx); |
|
1877 __ jcc(Assembler::greater, default_case); |
|
1878 // lookup dispatch offset |
|
1879 __ subl(rax, rcx); |
|
1880 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt)); |
|
1881 __ profile_switch_case(rax, rbx, rcx); |
|
1882 // continue execution |
|
1883 __ bind(continue_execution); |
|
1884 __ bswapl(rdx); |
|
1885 __ movl2ptr(rdx, rdx); |
|
1886 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1)); |
|
1887 __ addptr(r13, rdx); |
|
1888 __ dispatch_only(vtos); |
|
1889 // handle default |
|
1890 __ bind(default_case); |
|
1891 __ profile_switch_default(rax); |
|
1892 __ movl(rdx, Address(rbx, 0)); |
|
1893 __ jmp(continue_execution); |
|
1894 } |
|
1895 |
|
1896 void TemplateTable::lookupswitch() { |
|
1897 transition(itos, itos); |
|
1898 __ stop("lookupswitch bytecode should have been rewritten"); |
|
1899 } |
|
1900 |
|
1901 void TemplateTable::fast_linearswitch() { |
|
1902 transition(itos, vtos); |
|
1903 Label loop_entry, loop, found, continue_execution; |
|
1904 // bswap rax so we can avoid bswapping the table entries |
|
1905 __ bswapl(rax); |
|
1906 // align r13 |
|
1907 __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of |
|
1908 // this instruction (change offsets |
|
1909 // below) |
|
1910 __ andptr(rbx, -BytesPerInt); |
|
1911 // set counter |
|
1912 __ movl(rcx, Address(rbx, BytesPerInt)); |
|
1913 __ bswapl(rcx); |
|
1914 __ jmpb(loop_entry); |
|
1915 // table search |
|
1916 __ bind(loop); |
|
1917 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt)); |
|
1918 __ jcc(Assembler::equal, found); |
|
1919 __ bind(loop_entry); |
|
1920 __ decrementl(rcx); |
|
1921 __ jcc(Assembler::greaterEqual, loop); |
|
1922 // default case |
|
1923 __ profile_switch_default(rax); |
|
1924 __ movl(rdx, Address(rbx, 0)); |
|
1925 __ jmp(continue_execution); |
|
1926 // entry found -> get offset |
|
1927 __ bind(found); |
|
1928 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt)); |
|
1929 __ profile_switch_case(rcx, rax, rbx); |
|
1930 // continue execution |
|
1931 __ bind(continue_execution); |
|
1932 __ bswapl(rdx); |
|
1933 __ movl2ptr(rdx, rdx); |
|
1934 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1)); |
|
1935 __ addptr(r13, rdx); |
|
1936 __ dispatch_only(vtos); |
|
1937 } |
|
1938 |
|
1939 void TemplateTable::fast_binaryswitch() { |
|
1940 transition(itos, vtos); |
|
1941 // Implementation using the following core algorithm: |
|
1942 // |
|
1943 // int binary_search(int key, LookupswitchPair* array, int n) { |
|
1944 // // Binary search according to "Methodik des Programmierens" by |
|
1945 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. |
|
1946 // int i = 0; |
|
1947 // int j = n; |
|
1948 // while (i+1 < j) { |
|
1949 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) |
|
1950 // // with Q: for all i: 0 <= i < n: key < a[i] |
|
1951 // // where a stands for the array and assuming that the (inexisting) |
|
1952 // // element a[n] is infinitely big. |
|
1953 // int h = (i + j) >> 1; |
|
1954 // // i < h < j |
|
1955 // if (key < array[h].fast_match()) { |
|
1956 // j = h; |
|
1957 // } else { |
|
1958 // i = h; |
|
1959 // } |
|
1960 // } |
|
1961 // // R: a[i] <= key < a[i+1] or Q |
|
1962 // // (i.e., if key is within array, i is the correct index) |
|
1963 // return i; |
|
1964 // } |
|
1965 |
|
1966 // Register allocation |
|
1967 const Register key = rax; // already set (tosca) |
|
1968 const Register array = rbx; |
|
1969 const Register i = rcx; |
|
1970 const Register j = rdx; |
|
1971 const Register h = rdi; |
|
1972 const Register temp = rsi; |
|
1973 |
|
1974 // Find array start |
|
1975 __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to |
|
1976 // get rid of this |
|
1977 // instruction (change |
|
1978 // offsets below) |
|
1979 __ andptr(array, -BytesPerInt); |
|
1980 |
|
1981 // Initialize i & j |
|
1982 __ xorl(i, i); // i = 0; |
|
1983 __ movl(j, Address(array, -BytesPerInt)); // j = length(array); |
|
1984 |
|
1985 // Convert j into native byteordering |
|
1986 __ bswapl(j); |
|
1987 |
|
1988 // And start |
|
1989 Label entry; |
|
1990 __ jmp(entry); |
|
1991 |
|
1992 // binary search loop |
|
1993 { |
|
1994 Label loop; |
|
1995 __ bind(loop); |
|
1996 // int h = (i + j) >> 1; |
|
1997 __ leal(h, Address(i, j, Address::times_1)); // h = i + j; |
|
1998 __ sarl(h, 1); // h = (i + j) >> 1; |
|
1999 // if (key < array[h].fast_match()) { |
|
2000 // j = h; |
|
2001 // } else { |
|
2002 // i = h; |
|
2003 // } |
|
2004 // Convert array[h].match to native byte-ordering before compare |
|
2005 __ movl(temp, Address(array, h, Address::times_8)); |
|
2006 __ bswapl(temp); |
|
2007 __ cmpl(key, temp); |
|
2008 // j = h if (key < array[h].fast_match()) |
|
2009 __ cmovl(Assembler::less, j, h); |
|
2010 // i = h if (key >= array[h].fast_match()) |
|
2011 __ cmovl(Assembler::greaterEqual, i, h); |
|
2012 // while (i+1 < j) |
|
2013 __ bind(entry); |
|
2014 __ leal(h, Address(i, 1)); // i+1 |
|
2015 __ cmpl(h, j); // i+1 < j |
|
2016 __ jcc(Assembler::less, loop); |
|
2017 } |
|
2018 |
|
2019 // end of binary search, result index is i (must check again!) |
|
2020 Label default_case; |
|
2021 // Convert array[i].match to native byte-ordering before compare |
|
2022 __ movl(temp, Address(array, i, Address::times_8)); |
|
2023 __ bswapl(temp); |
|
2024 __ cmpl(key, temp); |
|
2025 __ jcc(Assembler::notEqual, default_case); |
|
2026 |
|
2027 // entry found -> j = offset |
|
2028 __ movl(j , Address(array, i, Address::times_8, BytesPerInt)); |
|
2029 __ profile_switch_case(i, key, array); |
|
2030 __ bswapl(j); |
|
2031 __ movl2ptr(j, j); |
|
2032 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1)); |
|
2033 __ addptr(r13, j); |
|
2034 __ dispatch_only(vtos); |
|
2035 |
|
2036 // default case -> j = default offset |
|
2037 __ bind(default_case); |
|
2038 __ profile_switch_default(i); |
|
2039 __ movl(j, Address(array, -2 * BytesPerInt)); |
|
2040 __ bswapl(j); |
|
2041 __ movl2ptr(j, j); |
|
2042 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1)); |
|
2043 __ addptr(r13, j); |
|
2044 __ dispatch_only(vtos); |
|
2045 } |
|
2046 |
|
2047 |
|
2048 void TemplateTable::_return(TosState state) { |
|
2049 transition(state, state); |
|
2050 assert(_desc->calls_vm(), |
|
2051 "inconsistent calls_vm information"); // call in remove_activation |
|
2052 |
|
2053 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { |
|
2054 assert(state == vtos, "only valid state"); |
|
2055 __ movptr(c_rarg1, aaddress(0)); |
|
2056 __ load_klass(rdi, c_rarg1); |
|
2057 __ movl(rdi, Address(rdi, Klass::access_flags_offset())); |
|
2058 __ testl(rdi, JVM_ACC_HAS_FINALIZER); |
|
2059 Label skip_register_finalizer; |
|
2060 __ jcc(Assembler::zero, skip_register_finalizer); |
|
2061 |
|
2062 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1); |
|
2063 |
|
2064 __ bind(skip_register_finalizer); |
|
2065 } |
|
2066 |
|
2067 __ remove_activation(state, r13); |
|
2068 __ jmp(r13); |
|
2069 } |
|
2070 |
|
2071 // ---------------------------------------------------------------------------- |
|
2072 // Volatile variables demand their effects be made known to all CPU's |
|
2073 // in order. Store buffers on most chips allow reads & writes to |
|
2074 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode |
|
2075 // without some kind of memory barrier (i.e., it's not sufficient that |
|
2076 // the interpreter does not reorder volatile references, the hardware |
|
2077 // also must not reorder them). |
|
2078 // |
|
2079 // According to the new Java Memory Model (JMM): |
|
2080 // (1) All volatiles are serialized wrt to each other. ALSO reads & |
|
2081 // writes act as aquire & release, so: |
|
2082 // (2) A read cannot let unrelated NON-volatile memory refs that |
|
2083 // happen after the read float up to before the read. It's OK for |
|
2084 // non-volatile memory refs that happen before the volatile read to |
|
2085 // float down below it. |
|
2086 // (3) Similar a volatile write cannot let unrelated NON-volatile |
|
2087 // memory refs that happen BEFORE the write float down to after the |
|
2088 // write. It's OK for non-volatile memory refs that happen after the |
|
2089 // volatile write to float up before it. |
|
2090 // |
|
2091 // We only put in barriers around volatile refs (they are expensive), |
|
2092 // not _between_ memory refs (that would require us to track the |
|
2093 // flavor of the previous memory refs). Requirements (2) and (3) |
|
2094 // require some barriers before volatile stores and after volatile |
|
2095 // loads. These nearly cover requirement (1) but miss the |
|
2096 // volatile-store-volatile-load case. This final case is placed after |
|
2097 // volatile-stores although it could just as well go before |
|
2098 // volatile-loads. |
|
2099 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits |
|
2100 order_constraint) { |
|
2101 // Helper function to insert a is-volatile test and memory barrier |
|
2102 if (os::is_MP()) { // Not needed on single CPU |
|
2103 __ membar(order_constraint); |
|
2104 } |
|
2105 } |
|
2106 |
|
2107 void TemplateTable::resolve_cache_and_index(int byte_no, |
|
2108 Register Rcache, |
|
2109 Register index, |
|
2110 size_t index_size) { |
|
2111 const Register temp = rbx; |
|
2112 assert_different_registers(Rcache, index, temp); |
|
2113 |
|
2114 Label resolved; |
|
2115 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); |
|
2116 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size); |
|
2117 __ cmpl(temp, (int) bytecode()); // have we resolved this bytecode? |
|
2118 __ jcc(Assembler::equal, resolved); |
|
2119 |
|
2120 // resolve first time through |
|
2121 address entry; |
|
2122 switch (bytecode()) { |
|
2123 case Bytecodes::_getstatic: |
|
2124 case Bytecodes::_putstatic: |
|
2125 case Bytecodes::_getfield: |
|
2126 case Bytecodes::_putfield: |
|
2127 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); |
|
2128 break; |
|
2129 case Bytecodes::_invokevirtual: |
|
2130 case Bytecodes::_invokespecial: |
|
2131 case Bytecodes::_invokestatic: |
|
2132 case Bytecodes::_invokeinterface: |
|
2133 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); |
|
2134 break; |
|
2135 case Bytecodes::_invokehandle: |
|
2136 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); |
|
2137 break; |
|
2138 case Bytecodes::_invokedynamic: |
|
2139 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); |
|
2140 break; |
|
2141 default: |
|
2142 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode()))); |
|
2143 break; |
|
2144 } |
|
2145 __ movl(temp, (int) bytecode()); |
|
2146 __ call_VM(noreg, entry, temp); |
|
2147 |
|
2148 // Update registers with resolved info |
|
2149 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); |
|
2150 __ bind(resolved); |
|
2151 } |
|
2152 |
|
2153 // The cache and index registers must be set before call |
|
2154 void TemplateTable::load_field_cp_cache_entry(Register obj, |
|
2155 Register cache, |
|
2156 Register index, |
|
2157 Register off, |
|
2158 Register flags, |
|
2159 bool is_static = false) { |
|
2160 assert_different_registers(cache, index, flags, off); |
|
2161 |
|
2162 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); |
|
2163 // Field offset |
|
2164 __ movptr(off, Address(cache, index, Address::times_ptr, |
|
2165 in_bytes(cp_base_offset + |
|
2166 ConstantPoolCacheEntry::f2_offset()))); |
|
2167 // Flags |
|
2168 __ movl(flags, Address(cache, index, Address::times_ptr, |
|
2169 in_bytes(cp_base_offset + |
|
2170 ConstantPoolCacheEntry::flags_offset()))); |
|
2171 |
|
2172 // klass overwrite register |
|
2173 if (is_static) { |
|
2174 __ movptr(obj, Address(cache, index, Address::times_ptr, |
|
2175 in_bytes(cp_base_offset + |
|
2176 ConstantPoolCacheEntry::f1_offset()))); |
|
2177 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); |
|
2178 __ movptr(obj, Address(obj, mirror_offset)); |
|
2179 } |
|
2180 } |
|
2181 |
|
2182 void TemplateTable::load_invoke_cp_cache_entry(int byte_no, |
|
2183 Register method, |
|
2184 Register itable_index, |
|
2185 Register flags, |
|
2186 bool is_invokevirtual, |
|
2187 bool is_invokevfinal, /*unused*/ |
|
2188 bool is_invokedynamic) { |
|
2189 // setup registers |
|
2190 const Register cache = rcx; |
|
2191 const Register index = rdx; |
|
2192 assert_different_registers(method, flags); |
|
2193 assert_different_registers(method, cache, index); |
|
2194 assert_different_registers(itable_index, flags); |
|
2195 assert_different_registers(itable_index, cache, index); |
|
2196 // determine constant pool cache field offsets |
|
2197 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); |
|
2198 const int method_offset = in_bytes( |
|
2199 ConstantPoolCache::base_offset() + |
|
2200 ((byte_no == f2_byte) |
|
2201 ? ConstantPoolCacheEntry::f2_offset() |
|
2202 : ConstantPoolCacheEntry::f1_offset())); |
|
2203 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() + |
|
2204 ConstantPoolCacheEntry::flags_offset()); |
|
2205 // access constant pool cache fields |
|
2206 const int index_offset = in_bytes(ConstantPoolCache::base_offset() + |
|
2207 ConstantPoolCacheEntry::f2_offset()); |
|
2208 |
|
2209 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2)); |
|
2210 resolve_cache_and_index(byte_no, cache, index, index_size); |
|
2211 __ movptr(method, Address(cache, index, Address::times_ptr, method_offset)); |
|
2212 |
|
2213 if (itable_index != noreg) { |
|
2214 // pick up itable or appendix index from f2 also: |
|
2215 __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset)); |
|
2216 } |
|
2217 __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset)); |
|
2218 } |
|
2219 |
|
2220 // Correct values of the cache and index registers are preserved. |
|
2221 void TemplateTable::jvmti_post_field_access(Register cache, Register index, |
|
2222 bool is_static, bool has_tos) { |
|
2223 // do the JVMTI work here to avoid disturbing the register state below |
|
2224 // We use c_rarg registers here because we want to use the register used in |
|
2225 // the call to the VM |
|
2226 if (JvmtiExport::can_post_field_access()) { |
|
2227 // Check to see if a field access watch has been set before we |
|
2228 // take the time to call into the VM. |
|
2229 Label L1; |
|
2230 assert_different_registers(cache, index, rax); |
|
2231 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr())); |
|
2232 __ testl(rax, rax); |
|
2233 __ jcc(Assembler::zero, L1); |
|
2234 |
|
2235 __ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1); |
|
2236 |
|
2237 // cache entry pointer |
|
2238 __ addptr(c_rarg2, in_bytes(ConstantPoolCache::base_offset())); |
|
2239 __ shll(c_rarg3, LogBytesPerWord); |
|
2240 __ addptr(c_rarg2, c_rarg3); |
|
2241 if (is_static) { |
|
2242 __ xorl(c_rarg1, c_rarg1); // NULL object reference |
|
2243 } else { |
|
2244 __ movptr(c_rarg1, at_tos()); // get object pointer without popping it |
|
2245 __ verify_oop(c_rarg1); |
|
2246 } |
|
2247 // c_rarg1: object pointer or NULL |
|
2248 // c_rarg2: cache entry pointer |
|
2249 // c_rarg3: jvalue object on the stack |
|
2250 __ call_VM(noreg, CAST_FROM_FN_PTR(address, |
|
2251 InterpreterRuntime::post_field_access), |
|
2252 c_rarg1, c_rarg2, c_rarg3); |
|
2253 __ get_cache_and_index_at_bcp(cache, index, 1); |
|
2254 __ bind(L1); |
|
2255 } |
|
2256 } |
|
2257 |
|
2258 void TemplateTable::pop_and_check_object(Register r) { |
|
2259 __ pop_ptr(r); |
|
2260 __ null_check(r); // for field access must check obj. |
|
2261 __ verify_oop(r); |
|
2262 } |
|
2263 |
|
2264 void TemplateTable::getfield_or_static(int byte_no, bool is_static) { |
|
2265 transition(vtos, vtos); |
|
2266 |
|
2267 const Register cache = rcx; |
|
2268 const Register index = rdx; |
|
2269 const Register obj = c_rarg3; |
|
2270 const Register off = rbx; |
|
2271 const Register flags = rax; |
|
2272 const Register bc = c_rarg3; // uses same reg as obj, so don't mix them |
|
2273 |
|
2274 resolve_cache_and_index(byte_no, cache, index, sizeof(u2)); |
|
2275 jvmti_post_field_access(cache, index, is_static, false); |
|
2276 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); |
|
2277 |
|
2278 if (!is_static) { |
|
2279 // obj is on the stack |
|
2280 pop_and_check_object(obj); |
|
2281 } |
|
2282 |
|
2283 const Address field(obj, off, Address::times_1); |
|
2284 |
|
2285 Label Done, notByte, notInt, notShort, notChar, |
|
2286 notLong, notFloat, notObj, notDouble; |
|
2287 |
|
2288 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift); |
|
2289 // Make sure we don't need to mask edx after the above shift |
|
2290 assert(btos == 0, "change code, btos != 0"); |
|
2291 |
|
2292 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask); |
|
2293 __ jcc(Assembler::notZero, notByte); |
|
2294 // btos |
|
2295 __ load_signed_byte(rax, field); |
|
2296 __ push(btos); |
|
2297 // Rewrite bytecode to be faster |
|
2298 if (!is_static) { |
|
2299 patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx); |
|
2300 } |
|
2301 __ jmp(Done); |
|
2302 |
|
2303 __ bind(notByte); |
|
2304 __ cmpl(flags, atos); |
|
2305 __ jcc(Assembler::notEqual, notObj); |
|
2306 // atos |
|
2307 __ load_heap_oop(rax, field); |
|
2308 __ push(atos); |
|
2309 if (!is_static) { |
|
2310 patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx); |
|
2311 } |
|
2312 __ jmp(Done); |
|
2313 |
|
2314 __ bind(notObj); |
|
2315 __ cmpl(flags, itos); |
|
2316 __ jcc(Assembler::notEqual, notInt); |
|
2317 // itos |
|
2318 __ movl(rax, field); |
|
2319 __ push(itos); |
|
2320 // Rewrite bytecode to be faster |
|
2321 if (!is_static) { |
|
2322 patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx); |
|
2323 } |
|
2324 __ jmp(Done); |
|
2325 |
|
2326 __ bind(notInt); |
|
2327 __ cmpl(flags, ctos); |
|
2328 __ jcc(Assembler::notEqual, notChar); |
|
2329 // ctos |
|
2330 __ load_unsigned_short(rax, field); |
|
2331 __ push(ctos); |
|
2332 // Rewrite bytecode to be faster |
|
2333 if (!is_static) { |
|
2334 patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx); |
|
2335 } |
|
2336 __ jmp(Done); |
|
2337 |
|
2338 __ bind(notChar); |
|
2339 __ cmpl(flags, stos); |
|
2340 __ jcc(Assembler::notEqual, notShort); |
|
2341 // stos |
|
2342 __ load_signed_short(rax, field); |
|
2343 __ push(stos); |
|
2344 // Rewrite bytecode to be faster |
|
2345 if (!is_static) { |
|
2346 patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx); |
|
2347 } |
|
2348 __ jmp(Done); |
|
2349 |
|
2350 __ bind(notShort); |
|
2351 __ cmpl(flags, ltos); |
|
2352 __ jcc(Assembler::notEqual, notLong); |
|
2353 // ltos |
|
2354 __ movq(rax, field); |
|
2355 __ push(ltos); |
|
2356 // Rewrite bytecode to be faster |
|
2357 if (!is_static) { |
|
2358 patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx); |
|
2359 } |
|
2360 __ jmp(Done); |
|
2361 |
|
2362 __ bind(notLong); |
|
2363 __ cmpl(flags, ftos); |
|
2364 __ jcc(Assembler::notEqual, notFloat); |
|
2365 // ftos |
|
2366 __ movflt(xmm0, field); |
|
2367 __ push(ftos); |
|
2368 // Rewrite bytecode to be faster |
|
2369 if (!is_static) { |
|
2370 patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx); |
|
2371 } |
|
2372 __ jmp(Done); |
|
2373 |
|
2374 __ bind(notFloat); |
|
2375 #ifdef ASSERT |
|
2376 __ cmpl(flags, dtos); |
|
2377 __ jcc(Assembler::notEqual, notDouble); |
|
2378 #endif |
|
2379 // dtos |
|
2380 __ movdbl(xmm0, field); |
|
2381 __ push(dtos); |
|
2382 // Rewrite bytecode to be faster |
|
2383 if (!is_static) { |
|
2384 patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx); |
|
2385 } |
|
2386 #ifdef ASSERT |
|
2387 __ jmp(Done); |
|
2388 |
|
2389 __ bind(notDouble); |
|
2390 __ stop("Bad state"); |
|
2391 #endif |
|
2392 |
|
2393 __ bind(Done); |
|
2394 // [jk] not needed currently |
|
2395 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadLoad | |
|
2396 // Assembler::LoadStore)); |
|
2397 } |
|
2398 |
|
2399 |
|
2400 void TemplateTable::getfield(int byte_no) { |
|
2401 getfield_or_static(byte_no, false); |
|
2402 } |
|
2403 |
|
2404 void TemplateTable::getstatic(int byte_no) { |
|
2405 getfield_or_static(byte_no, true); |
|
2406 } |
|
2407 |
|
2408 // The registers cache and index expected to be set before call. |
|
2409 // The function may destroy various registers, just not the cache and index registers. |
|
2410 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) { |
|
2411 transition(vtos, vtos); |
|
2412 |
|
2413 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); |
|
2414 |
|
2415 if (JvmtiExport::can_post_field_modification()) { |
|
2416 // Check to see if a field modification watch has been set before |
|
2417 // we take the time to call into the VM. |
|
2418 Label L1; |
|
2419 assert_different_registers(cache, index, rax); |
|
2420 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr())); |
|
2421 __ testl(rax, rax); |
|
2422 __ jcc(Assembler::zero, L1); |
|
2423 |
|
2424 __ get_cache_and_index_at_bcp(c_rarg2, rscratch1, 1); |
|
2425 |
|
2426 if (is_static) { |
|
2427 // Life is simple. Null out the object pointer. |
|
2428 __ xorl(c_rarg1, c_rarg1); |
|
2429 } else { |
|
2430 // Life is harder. The stack holds the value on top, followed by |
|
2431 // the object. We don't know the size of the value, though; it |
|
2432 // could be one or two words depending on its type. As a result, |
|
2433 // we must find the type to determine where the object is. |
|
2434 __ movl(c_rarg3, Address(c_rarg2, rscratch1, |
|
2435 Address::times_8, |
|
2436 in_bytes(cp_base_offset + |
|
2437 ConstantPoolCacheEntry::flags_offset()))); |
|
2438 __ shrl(c_rarg3, ConstantPoolCacheEntry::tos_state_shift); |
|
2439 // Make sure we don't need to mask rcx after the above shift |
|
2440 ConstantPoolCacheEntry::verify_tos_state_shift(); |
|
2441 __ movptr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue |
|
2442 __ cmpl(c_rarg3, ltos); |
|
2443 __ cmovptr(Assembler::equal, |
|
2444 c_rarg1, at_tos_p2()); // ltos (two word jvalue) |
|
2445 __ cmpl(c_rarg3, dtos); |
|
2446 __ cmovptr(Assembler::equal, |
|
2447 c_rarg1, at_tos_p2()); // dtos (two word jvalue) |
|
2448 } |
|
2449 // cache entry pointer |
|
2450 __ addptr(c_rarg2, in_bytes(cp_base_offset)); |
|
2451 __ shll(rscratch1, LogBytesPerWord); |
|
2452 __ addptr(c_rarg2, rscratch1); |
|
2453 // object (tos) |
|
2454 __ mov(c_rarg3, rsp); |
|
2455 // c_rarg1: object pointer set up above (NULL if static) |
|
2456 // c_rarg2: cache entry pointer |
|
2457 // c_rarg3: jvalue object on the stack |
|
2458 __ call_VM(noreg, |
|
2459 CAST_FROM_FN_PTR(address, |
|
2460 InterpreterRuntime::post_field_modification), |
|
2461 c_rarg1, c_rarg2, c_rarg3); |
|
2462 __ get_cache_and_index_at_bcp(cache, index, 1); |
|
2463 __ bind(L1); |
|
2464 } |
|
2465 } |
|
2466 |
|
2467 void TemplateTable::putfield_or_static(int byte_no, bool is_static) { |
|
2468 transition(vtos, vtos); |
|
2469 |
|
2470 const Register cache = rcx; |
|
2471 const Register index = rdx; |
|
2472 const Register obj = rcx; |
|
2473 const Register off = rbx; |
|
2474 const Register flags = rax; |
|
2475 const Register bc = c_rarg3; |
|
2476 |
|
2477 resolve_cache_and_index(byte_no, cache, index, sizeof(u2)); |
|
2478 jvmti_post_field_mod(cache, index, is_static); |
|
2479 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); |
|
2480 |
|
2481 // [jk] not needed currently |
|
2482 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore | |
|
2483 // Assembler::StoreStore)); |
|
2484 |
|
2485 Label notVolatile, Done; |
|
2486 __ movl(rdx, flags); |
|
2487 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); |
|
2488 __ andl(rdx, 0x1); |
|
2489 |
|
2490 // field address |
|
2491 const Address field(obj, off, Address::times_1); |
|
2492 |
|
2493 Label notByte, notInt, notShort, notChar, |
|
2494 notLong, notFloat, notObj, notDouble; |
|
2495 |
|
2496 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift); |
|
2497 |
|
2498 assert(btos == 0, "change code, btos != 0"); |
|
2499 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask); |
|
2500 __ jcc(Assembler::notZero, notByte); |
|
2501 |
|
2502 // btos |
|
2503 { |
|
2504 __ pop(btos); |
|
2505 if (!is_static) pop_and_check_object(obj); |
|
2506 __ movb(field, rax); |
|
2507 if (!is_static) { |
|
2508 patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx, true, byte_no); |
|
2509 } |
|
2510 __ jmp(Done); |
|
2511 } |
|
2512 |
|
2513 __ bind(notByte); |
|
2514 __ cmpl(flags, atos); |
|
2515 __ jcc(Assembler::notEqual, notObj); |
|
2516 |
|
2517 // atos |
|
2518 { |
|
2519 __ pop(atos); |
|
2520 if (!is_static) pop_and_check_object(obj); |
|
2521 // Store into the field |
|
2522 do_oop_store(_masm, field, rax, _bs->kind(), false); |
|
2523 if (!is_static) { |
|
2524 patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no); |
|
2525 } |
|
2526 __ jmp(Done); |
|
2527 } |
|
2528 |
|
2529 __ bind(notObj); |
|
2530 __ cmpl(flags, itos); |
|
2531 __ jcc(Assembler::notEqual, notInt); |
|
2532 |
|
2533 // itos |
|
2534 { |
|
2535 __ pop(itos); |
|
2536 if (!is_static) pop_and_check_object(obj); |
|
2537 __ movl(field, rax); |
|
2538 if (!is_static) { |
|
2539 patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx, true, byte_no); |
|
2540 } |
|
2541 __ jmp(Done); |
|
2542 } |
|
2543 |
|
2544 __ bind(notInt); |
|
2545 __ cmpl(flags, ctos); |
|
2546 __ jcc(Assembler::notEqual, notChar); |
|
2547 |
|
2548 // ctos |
|
2549 { |
|
2550 __ pop(ctos); |
|
2551 if (!is_static) pop_and_check_object(obj); |
|
2552 __ movw(field, rax); |
|
2553 if (!is_static) { |
|
2554 patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx, true, byte_no); |
|
2555 } |
|
2556 __ jmp(Done); |
|
2557 } |
|
2558 |
|
2559 __ bind(notChar); |
|
2560 __ cmpl(flags, stos); |
|
2561 __ jcc(Assembler::notEqual, notShort); |
|
2562 |
|
2563 // stos |
|
2564 { |
|
2565 __ pop(stos); |
|
2566 if (!is_static) pop_and_check_object(obj); |
|
2567 __ movw(field, rax); |
|
2568 if (!is_static) { |
|
2569 patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx, true, byte_no); |
|
2570 } |
|
2571 __ jmp(Done); |
|
2572 } |
|
2573 |
|
2574 __ bind(notShort); |
|
2575 __ cmpl(flags, ltos); |
|
2576 __ jcc(Assembler::notEqual, notLong); |
|
2577 |
|
2578 // ltos |
|
2579 { |
|
2580 __ pop(ltos); |
|
2581 if (!is_static) pop_and_check_object(obj); |
|
2582 __ movq(field, rax); |
|
2583 if (!is_static) { |
|
2584 patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx, true, byte_no); |
|
2585 } |
|
2586 __ jmp(Done); |
|
2587 } |
|
2588 |
|
2589 __ bind(notLong); |
|
2590 __ cmpl(flags, ftos); |
|
2591 __ jcc(Assembler::notEqual, notFloat); |
|
2592 |
|
2593 // ftos |
|
2594 { |
|
2595 __ pop(ftos); |
|
2596 if (!is_static) pop_and_check_object(obj); |
|
2597 __ movflt(field, xmm0); |
|
2598 if (!is_static) { |
|
2599 patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx, true, byte_no); |
|
2600 } |
|
2601 __ jmp(Done); |
|
2602 } |
|
2603 |
|
2604 __ bind(notFloat); |
|
2605 #ifdef ASSERT |
|
2606 __ cmpl(flags, dtos); |
|
2607 __ jcc(Assembler::notEqual, notDouble); |
|
2608 #endif |
|
2609 |
|
2610 // dtos |
|
2611 { |
|
2612 __ pop(dtos); |
|
2613 if (!is_static) pop_and_check_object(obj); |
|
2614 __ movdbl(field, xmm0); |
|
2615 if (!is_static) { |
|
2616 patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx, true, byte_no); |
|
2617 } |
|
2618 } |
|
2619 |
|
2620 #ifdef ASSERT |
|
2621 __ jmp(Done); |
|
2622 |
|
2623 __ bind(notDouble); |
|
2624 __ stop("Bad state"); |
|
2625 #endif |
|
2626 |
|
2627 __ bind(Done); |
|
2628 |
|
2629 // Check for volatile store |
|
2630 __ testl(rdx, rdx); |
|
2631 __ jcc(Assembler::zero, notVolatile); |
|
2632 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad | |
|
2633 Assembler::StoreStore)); |
|
2634 __ bind(notVolatile); |
|
2635 } |
|
2636 |
|
2637 void TemplateTable::putfield(int byte_no) { |
|
2638 putfield_or_static(byte_no, false); |
|
2639 } |
|
2640 |
|
2641 void TemplateTable::putstatic(int byte_no) { |
|
2642 putfield_or_static(byte_no, true); |
|
2643 } |
|
2644 |
|
2645 void TemplateTable::jvmti_post_fast_field_mod() { |
|
2646 if (JvmtiExport::can_post_field_modification()) { |
|
2647 // Check to see if a field modification watch has been set before |
|
2648 // we take the time to call into the VM. |
|
2649 Label L2; |
|
2650 __ mov32(c_rarg3, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr())); |
|
2651 __ testl(c_rarg3, c_rarg3); |
|
2652 __ jcc(Assembler::zero, L2); |
|
2653 __ pop_ptr(rbx); // copy the object pointer from tos |
|
2654 __ verify_oop(rbx); |
|
2655 __ push_ptr(rbx); // put the object pointer back on tos |
|
2656 // Save tos values before call_VM() clobbers them. Since we have |
|
2657 // to do it for every data type, we use the saved values as the |
|
2658 // jvalue object. |
|
2659 switch (bytecode()) { // load values into the jvalue object |
|
2660 case Bytecodes::_fast_aputfield: __ push_ptr(rax); break; |
|
2661 case Bytecodes::_fast_bputfield: // fall through |
|
2662 case Bytecodes::_fast_sputfield: // fall through |
|
2663 case Bytecodes::_fast_cputfield: // fall through |
|
2664 case Bytecodes::_fast_iputfield: __ push_i(rax); break; |
|
2665 case Bytecodes::_fast_dputfield: __ push_d(); break; |
|
2666 case Bytecodes::_fast_fputfield: __ push_f(); break; |
|
2667 case Bytecodes::_fast_lputfield: __ push_l(rax); break; |
|
2668 |
|
2669 default: |
|
2670 ShouldNotReachHere(); |
|
2671 } |
|
2672 __ mov(c_rarg3, rsp); // points to jvalue on the stack |
|
2673 // access constant pool cache entry |
|
2674 __ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1); |
|
2675 __ verify_oop(rbx); |
|
2676 // rbx: object pointer copied above |
|
2677 // c_rarg2: cache entry pointer |
|
2678 // c_rarg3: jvalue object on the stack |
|
2679 __ call_VM(noreg, |
|
2680 CAST_FROM_FN_PTR(address, |
|
2681 InterpreterRuntime::post_field_modification), |
|
2682 rbx, c_rarg2, c_rarg3); |
|
2683 |
|
2684 switch (bytecode()) { // restore tos values |
|
2685 case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break; |
|
2686 case Bytecodes::_fast_bputfield: // fall through |
|
2687 case Bytecodes::_fast_sputfield: // fall through |
|
2688 case Bytecodes::_fast_cputfield: // fall through |
|
2689 case Bytecodes::_fast_iputfield: __ pop_i(rax); break; |
|
2690 case Bytecodes::_fast_dputfield: __ pop_d(); break; |
|
2691 case Bytecodes::_fast_fputfield: __ pop_f(); break; |
|
2692 case Bytecodes::_fast_lputfield: __ pop_l(rax); break; |
|
2693 } |
|
2694 __ bind(L2); |
|
2695 } |
|
2696 } |
|
2697 |
|
2698 void TemplateTable::fast_storefield(TosState state) { |
|
2699 transition(state, vtos); |
|
2700 |
|
2701 ByteSize base = ConstantPoolCache::base_offset(); |
|
2702 |
|
2703 jvmti_post_fast_field_mod(); |
|
2704 |
|
2705 // access constant pool cache |
|
2706 __ get_cache_and_index_at_bcp(rcx, rbx, 1); |
|
2707 |
|
2708 // test for volatile with rdx |
|
2709 __ movl(rdx, Address(rcx, rbx, Address::times_8, |
|
2710 in_bytes(base + |
|
2711 ConstantPoolCacheEntry::flags_offset()))); |
|
2712 |
|
2713 // replace index with field offset from cache entry |
|
2714 __ movptr(rbx, Address(rcx, rbx, Address::times_8, |
|
2715 in_bytes(base + ConstantPoolCacheEntry::f2_offset()))); |
|
2716 |
|
2717 // [jk] not needed currently |
|
2718 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore | |
|
2719 // Assembler::StoreStore)); |
|
2720 |
|
2721 Label notVolatile; |
|
2722 __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); |
|
2723 __ andl(rdx, 0x1); |
|
2724 |
|
2725 // Get object from stack |
|
2726 pop_and_check_object(rcx); |
|
2727 |
|
2728 // field address |
|
2729 const Address field(rcx, rbx, Address::times_1); |
|
2730 |
|
2731 // access field |
|
2732 switch (bytecode()) { |
|
2733 case Bytecodes::_fast_aputfield: |
|
2734 do_oop_store(_masm, field, rax, _bs->kind(), false); |
|
2735 break; |
|
2736 case Bytecodes::_fast_lputfield: |
|
2737 __ movq(field, rax); |
|
2738 break; |
|
2739 case Bytecodes::_fast_iputfield: |
|
2740 __ movl(field, rax); |
|
2741 break; |
|
2742 case Bytecodes::_fast_bputfield: |
|
2743 __ movb(field, rax); |
|
2744 break; |
|
2745 case Bytecodes::_fast_sputfield: |
|
2746 // fall through |
|
2747 case Bytecodes::_fast_cputfield: |
|
2748 __ movw(field, rax); |
|
2749 break; |
|
2750 case Bytecodes::_fast_fputfield: |
|
2751 __ movflt(field, xmm0); |
|
2752 break; |
|
2753 case Bytecodes::_fast_dputfield: |
|
2754 __ movdbl(field, xmm0); |
|
2755 break; |
|
2756 default: |
|
2757 ShouldNotReachHere(); |
|
2758 } |
|
2759 |
|
2760 // Check for volatile store |
|
2761 __ testl(rdx, rdx); |
|
2762 __ jcc(Assembler::zero, notVolatile); |
|
2763 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad | |
|
2764 Assembler::StoreStore)); |
|
2765 __ bind(notVolatile); |
|
2766 } |
|
2767 |
|
2768 |
|
2769 void TemplateTable::fast_accessfield(TosState state) { |
|
2770 transition(atos, state); |
|
2771 |
|
2772 // Do the JVMTI work here to avoid disturbing the register state below |
|
2773 if (JvmtiExport::can_post_field_access()) { |
|
2774 // Check to see if a field access watch has been set before we |
|
2775 // take the time to call into the VM. |
|
2776 Label L1; |
|
2777 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr())); |
|
2778 __ testl(rcx, rcx); |
|
2779 __ jcc(Assembler::zero, L1); |
|
2780 // access constant pool cache entry |
|
2781 __ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1); |
|
2782 __ verify_oop(rax); |
|
2783 __ push_ptr(rax); // save object pointer before call_VM() clobbers it |
|
2784 __ mov(c_rarg1, rax); |
|
2785 // c_rarg1: object pointer copied above |
|
2786 // c_rarg2: cache entry pointer |
|
2787 __ call_VM(noreg, |
|
2788 CAST_FROM_FN_PTR(address, |
|
2789 InterpreterRuntime::post_field_access), |
|
2790 c_rarg1, c_rarg2); |
|
2791 __ pop_ptr(rax); // restore object pointer |
|
2792 __ bind(L1); |
|
2793 } |
|
2794 |
|
2795 // access constant pool cache |
|
2796 __ get_cache_and_index_at_bcp(rcx, rbx, 1); |
|
2797 // replace index with field offset from cache entry |
|
2798 // [jk] not needed currently |
|
2799 // if (os::is_MP()) { |
|
2800 // __ movl(rdx, Address(rcx, rbx, Address::times_8, |
|
2801 // in_bytes(ConstantPoolCache::base_offset() + |
|
2802 // ConstantPoolCacheEntry::flags_offset()))); |
|
2803 // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); |
|
2804 // __ andl(rdx, 0x1); |
|
2805 // } |
|
2806 __ movptr(rbx, Address(rcx, rbx, Address::times_8, |
|
2807 in_bytes(ConstantPoolCache::base_offset() + |
|
2808 ConstantPoolCacheEntry::f2_offset()))); |
|
2809 |
|
2810 // rax: object |
|
2811 __ verify_oop(rax); |
|
2812 __ null_check(rax); |
|
2813 Address field(rax, rbx, Address::times_1); |
|
2814 |
|
2815 // access field |
|
2816 switch (bytecode()) { |
|
2817 case Bytecodes::_fast_agetfield: |
|
2818 __ load_heap_oop(rax, field); |
|
2819 __ verify_oop(rax); |
|
2820 break; |
|
2821 case Bytecodes::_fast_lgetfield: |
|
2822 __ movq(rax, field); |
|
2823 break; |
|
2824 case Bytecodes::_fast_igetfield: |
|
2825 __ movl(rax, field); |
|
2826 break; |
|
2827 case Bytecodes::_fast_bgetfield: |
|
2828 __ movsbl(rax, field); |
|
2829 break; |
|
2830 case Bytecodes::_fast_sgetfield: |
|
2831 __ load_signed_short(rax, field); |
|
2832 break; |
|
2833 case Bytecodes::_fast_cgetfield: |
|
2834 __ load_unsigned_short(rax, field); |
|
2835 break; |
|
2836 case Bytecodes::_fast_fgetfield: |
|
2837 __ movflt(xmm0, field); |
|
2838 break; |
|
2839 case Bytecodes::_fast_dgetfield: |
|
2840 __ movdbl(xmm0, field); |
|
2841 break; |
|
2842 default: |
|
2843 ShouldNotReachHere(); |
|
2844 } |
|
2845 // [jk] not needed currently |
|
2846 // if (os::is_MP()) { |
|
2847 // Label notVolatile; |
|
2848 // __ testl(rdx, rdx); |
|
2849 // __ jcc(Assembler::zero, notVolatile); |
|
2850 // __ membar(Assembler::LoadLoad); |
|
2851 // __ bind(notVolatile); |
|
2852 //}; |
|
2853 } |
|
2854 |
|
2855 void TemplateTable::fast_xaccess(TosState state) { |
|
2856 transition(vtos, state); |
|
2857 |
|
2858 // get receiver |
|
2859 __ movptr(rax, aaddress(0)); |
|
2860 // access constant pool cache |
|
2861 __ get_cache_and_index_at_bcp(rcx, rdx, 2); |
|
2862 __ movptr(rbx, |
|
2863 Address(rcx, rdx, Address::times_8, |
|
2864 in_bytes(ConstantPoolCache::base_offset() + |
|
2865 ConstantPoolCacheEntry::f2_offset()))); |
|
2866 // make sure exception is reported in correct bcp range (getfield is |
|
2867 // next instruction) |
|
2868 __ increment(r13); |
|
2869 __ null_check(rax); |
|
2870 switch (state) { |
|
2871 case itos: |
|
2872 __ movl(rax, Address(rax, rbx, Address::times_1)); |
|
2873 break; |
|
2874 case atos: |
|
2875 __ load_heap_oop(rax, Address(rax, rbx, Address::times_1)); |
|
2876 __ verify_oop(rax); |
|
2877 break; |
|
2878 case ftos: |
|
2879 __ movflt(xmm0, Address(rax, rbx, Address::times_1)); |
|
2880 break; |
|
2881 default: |
|
2882 ShouldNotReachHere(); |
|
2883 } |
|
2884 |
|
2885 // [jk] not needed currently |
|
2886 // if (os::is_MP()) { |
|
2887 // Label notVolatile; |
|
2888 // __ movl(rdx, Address(rcx, rdx, Address::times_8, |
|
2889 // in_bytes(ConstantPoolCache::base_offset() + |
|
2890 // ConstantPoolCacheEntry::flags_offset()))); |
|
2891 // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); |
|
2892 // __ testl(rdx, 0x1); |
|
2893 // __ jcc(Assembler::zero, notVolatile); |
|
2894 // __ membar(Assembler::LoadLoad); |
|
2895 // __ bind(notVolatile); |
|
2896 // } |
|
2897 |
|
2898 __ decrement(r13); |
|
2899 } |
|
2900 |
|
2901 |
|
2902 |
|
2903 //----------------------------------------------------------------------------- |
|
2904 // Calls |
|
2905 |
|
2906 void TemplateTable::count_calls(Register method, Register temp) { |
|
2907 // implemented elsewhere |
|
2908 ShouldNotReachHere(); |
|
2909 } |
|
2910 |
|
2911 void TemplateTable::prepare_invoke(int byte_no, |
|
2912 Register method, // linked method (or i-klass) |
|
2913 Register index, // itable index, MethodType, etc. |
|
2914 Register recv, // if caller wants to see it |
|
2915 Register flags // if caller wants to test it |
|
2916 ) { |
|
2917 // determine flags |
|
2918 const Bytecodes::Code code = bytecode(); |
|
2919 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; |
|
2920 const bool is_invokedynamic = code == Bytecodes::_invokedynamic; |
|
2921 const bool is_invokehandle = code == Bytecodes::_invokehandle; |
|
2922 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; |
|
2923 const bool is_invokespecial = code == Bytecodes::_invokespecial; |
|
2924 const bool load_receiver = (recv != noreg); |
|
2925 const bool save_flags = (flags != noreg); |
|
2926 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); |
|
2927 assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal"); |
|
2928 assert(flags == noreg || flags == rdx, ""); |
|
2929 assert(recv == noreg || recv == rcx, ""); |
|
2930 |
|
2931 // setup registers & access constant pool cache |
|
2932 if (recv == noreg) recv = rcx; |
|
2933 if (flags == noreg) flags = rdx; |
|
2934 assert_different_registers(method, index, recv, flags); |
|
2935 |
|
2936 // save 'interpreter return address' |
|
2937 __ save_bcp(); |
|
2938 |
|
2939 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic); |
|
2940 |
|
2941 // maybe push appendix to arguments (just before return address) |
|
2942 if (is_invokedynamic || is_invokehandle) { |
|
2943 Label L_no_push; |
|
2944 __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift)); |
|
2945 __ jcc(Assembler::zero, L_no_push); |
|
2946 // Push the appendix as a trailing parameter. |
|
2947 // This must be done before we get the receiver, |
|
2948 // since the parameter_size includes it. |
|
2949 __ push(rbx); |
|
2950 __ mov(rbx, index); |
|
2951 assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0"); |
|
2952 __ load_resolved_reference_at_index(index, rbx); |
|
2953 __ pop(rbx); |
|
2954 __ push(index); // push appendix (MethodType, CallSite, etc.) |
|
2955 __ bind(L_no_push); |
|
2956 } |
|
2957 |
|
2958 // load receiver if needed (after appendix is pushed so parameter size is correct) |
|
2959 // Note: no return address pushed yet |
|
2960 if (load_receiver) { |
|
2961 __ movl(recv, flags); |
|
2962 __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask); |
|
2963 const int no_return_pc_pushed_yet = -1; // argument slot correction before we push return address |
|
2964 const int receiver_is_at_end = -1; // back off one slot to get receiver |
|
2965 Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end); |
|
2966 __ movptr(recv, recv_addr); |
|
2967 __ verify_oop(recv); |
|
2968 } |
|
2969 |
|
2970 if (save_flags) { |
|
2971 __ movl(r13, flags); |
|
2972 } |
|
2973 |
|
2974 // compute return type |
|
2975 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift); |
|
2976 // Make sure we don't need to mask flags after the above shift |
|
2977 ConstantPoolCacheEntry::verify_tos_state_shift(); |
|
2978 // load return address |
|
2979 { |
|
2980 const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); |
|
2981 ExternalAddress table(table_addr); |
|
2982 __ lea(rscratch1, table); |
|
2983 __ movptr(flags, Address(rscratch1, flags, Address::times_ptr)); |
|
2984 } |
|
2985 |
|
2986 // push return address |
|
2987 __ push(flags); |
|
2988 |
|
2989 // Restore flags value from the constant pool cache, and restore rsi |
|
2990 // for later null checks. r13 is the bytecode pointer |
|
2991 if (save_flags) { |
|
2992 __ movl(flags, r13); |
|
2993 __ restore_bcp(); |
|
2994 } |
|
2995 } |
|
2996 |
|
2997 |
|
2998 void TemplateTable::invokevirtual_helper(Register index, |
|
2999 Register recv, |
|
3000 Register flags) { |
|
3001 // Uses temporary registers rax, rdx |
|
3002 assert_different_registers(index, recv, rax, rdx); |
|
3003 assert(index == rbx, ""); |
|
3004 assert(recv == rcx, ""); |
|
3005 |
|
3006 // Test for an invoke of a final method |
|
3007 Label notFinal; |
|
3008 __ movl(rax, flags); |
|
3009 __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift)); |
|
3010 __ jcc(Assembler::zero, notFinal); |
|
3011 |
|
3012 const Register method = index; // method must be rbx |
|
3013 assert(method == rbx, |
|
3014 "Method* must be rbx for interpreter calling convention"); |
|
3015 |
|
3016 // do the call - the index is actually the method to call |
|
3017 // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method* |
|
3018 |
|
3019 // It's final, need a null check here! |
|
3020 __ null_check(recv); |
|
3021 |
|
3022 // profile this call |
|
3023 __ profile_final_call(rax); |
|
3024 __ profile_arguments_type(rax, method, r13, true); |
|
3025 |
|
3026 __ jump_from_interpreted(method, rax); |
|
3027 |
|
3028 __ bind(notFinal); |
|
3029 |
|
3030 // get receiver klass |
|
3031 __ null_check(recv, oopDesc::klass_offset_in_bytes()); |
|
3032 __ load_klass(rax, recv); |
|
3033 |
|
3034 // profile this call |
|
3035 __ profile_virtual_call(rax, r14, rdx); |
|
3036 |
|
3037 // get target Method* & entry point |
|
3038 __ lookup_virtual_method(rax, index, method); |
|
3039 __ profile_arguments_type(rdx, method, r13, true); |
|
3040 __ jump_from_interpreted(method, rdx); |
|
3041 } |
|
3042 |
|
3043 |
|
3044 void TemplateTable::invokevirtual(int byte_no) { |
|
3045 transition(vtos, vtos); |
|
3046 assert(byte_no == f2_byte, "use this argument"); |
|
3047 prepare_invoke(byte_no, |
|
3048 rbx, // method or vtable index |
|
3049 noreg, // unused itable index |
|
3050 rcx, rdx); // recv, flags |
|
3051 |
|
3052 // rbx: index |
|
3053 // rcx: receiver |
|
3054 // rdx: flags |
|
3055 |
|
3056 invokevirtual_helper(rbx, rcx, rdx); |
|
3057 } |
|
3058 |
|
3059 |
|
3060 void TemplateTable::invokespecial(int byte_no) { |
|
3061 transition(vtos, vtos); |
|
3062 assert(byte_no == f1_byte, "use this argument"); |
|
3063 prepare_invoke(byte_no, rbx, noreg, // get f1 Method* |
|
3064 rcx); // get receiver also for null check |
|
3065 __ verify_oop(rcx); |
|
3066 __ null_check(rcx); |
|
3067 // do the call |
|
3068 __ profile_call(rax); |
|
3069 __ profile_arguments_type(rax, rbx, r13, false); |
|
3070 __ jump_from_interpreted(rbx, rax); |
|
3071 } |
|
3072 |
|
3073 |
|
3074 void TemplateTable::invokestatic(int byte_no) { |
|
3075 transition(vtos, vtos); |
|
3076 assert(byte_no == f1_byte, "use this argument"); |
|
3077 prepare_invoke(byte_no, rbx); // get f1 Method* |
|
3078 // do the call |
|
3079 __ profile_call(rax); |
|
3080 __ profile_arguments_type(rax, rbx, r13, false); |
|
3081 __ jump_from_interpreted(rbx, rax); |
|
3082 } |
|
3083 |
|
3084 void TemplateTable::fast_invokevfinal(int byte_no) { |
|
3085 transition(vtos, vtos); |
|
3086 assert(byte_no == f2_byte, "use this argument"); |
|
3087 __ stop("fast_invokevfinal not used on amd64"); |
|
3088 } |
|
3089 |
|
3090 void TemplateTable::invokeinterface(int byte_no) { |
|
3091 transition(vtos, vtos); |
|
3092 assert(byte_no == f1_byte, "use this argument"); |
|
3093 prepare_invoke(byte_no, rax, rbx, // get f1 Klass*, f2 itable index |
|
3094 rcx, rdx); // recv, flags |
|
3095 |
|
3096 // rax: interface klass (from f1) |
|
3097 // rbx: itable index (from f2) |
|
3098 // rcx: receiver |
|
3099 // rdx: flags |
|
3100 |
|
3101 // Special case of invokeinterface called for virtual method of |
|
3102 // java.lang.Object. See cpCacheOop.cpp for details. |
|
3103 // This code isn't produced by javac, but could be produced by |
|
3104 // another compliant java compiler. |
|
3105 Label notMethod; |
|
3106 __ movl(r14, rdx); |
|
3107 __ andl(r14, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift)); |
|
3108 __ jcc(Assembler::zero, notMethod); |
|
3109 |
|
3110 invokevirtual_helper(rbx, rcx, rdx); |
|
3111 __ bind(notMethod); |
|
3112 |
|
3113 // Get receiver klass into rdx - also a null check |
|
3114 __ restore_locals(); // restore r14 |
|
3115 __ null_check(rcx, oopDesc::klass_offset_in_bytes()); |
|
3116 __ load_klass(rdx, rcx); |
|
3117 |
|
3118 // profile this call |
|
3119 __ profile_virtual_call(rdx, r13, r14); |
|
3120 |
|
3121 Label no_such_interface, no_such_method; |
|
3122 |
|
3123 __ lookup_interface_method(// inputs: rec. class, interface, itable index |
|
3124 rdx, rax, rbx, |
|
3125 // outputs: method, scan temp. reg |
|
3126 rbx, r13, |
|
3127 no_such_interface); |
|
3128 |
|
3129 // rbx: Method* to call |
|
3130 // rcx: receiver |
|
3131 // Check for abstract method error |
|
3132 // Note: This should be done more efficiently via a throw_abstract_method_error |
|
3133 // interpreter entry point and a conditional jump to it in case of a null |
|
3134 // method. |
|
3135 __ testptr(rbx, rbx); |
|
3136 __ jcc(Assembler::zero, no_such_method); |
|
3137 |
|
3138 __ profile_arguments_type(rdx, rbx, r13, true); |
|
3139 |
|
3140 // do the call |
|
3141 // rcx: receiver |
|
3142 // rbx,: Method* |
|
3143 __ jump_from_interpreted(rbx, rdx); |
|
3144 __ should_not_reach_here(); |
|
3145 |
|
3146 // exception handling code follows... |
|
3147 // note: must restore interpreter registers to canonical |
|
3148 // state for exception handling to work correctly! |
|
3149 |
|
3150 __ bind(no_such_method); |
|
3151 // throw exception |
|
3152 __ pop(rbx); // pop return address (pushed by prepare_invoke) |
|
3153 __ restore_bcp(); // r13 must be correct for exception handler (was destroyed) |
|
3154 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) |
|
3155 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); |
|
3156 // the call_VM checks for exception, so we should never return here. |
|
3157 __ should_not_reach_here(); |
|
3158 |
|
3159 __ bind(no_such_interface); |
|
3160 // throw exception |
|
3161 __ pop(rbx); // pop return address (pushed by prepare_invoke) |
|
3162 __ restore_bcp(); // r13 must be correct for exception handler (was destroyed) |
|
3163 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) |
|
3164 __ call_VM(noreg, CAST_FROM_FN_PTR(address, |
|
3165 InterpreterRuntime::throw_IncompatibleClassChangeError)); |
|
3166 // the call_VM checks for exception, so we should never return here. |
|
3167 __ should_not_reach_here(); |
|
3168 } |
|
3169 |
|
3170 |
|
3171 void TemplateTable::invokehandle(int byte_no) { |
|
3172 transition(vtos, vtos); |
|
3173 assert(byte_no == f1_byte, "use this argument"); |
|
3174 const Register rbx_method = rbx; |
|
3175 const Register rax_mtype = rax; |
|
3176 const Register rcx_recv = rcx; |
|
3177 const Register rdx_flags = rdx; |
|
3178 |
|
3179 prepare_invoke(byte_no, rbx_method, rax_mtype, rcx_recv); |
|
3180 __ verify_method_ptr(rbx_method); |
|
3181 __ verify_oop(rcx_recv); |
|
3182 __ null_check(rcx_recv); |
|
3183 |
|
3184 // rax: MethodType object (from cpool->resolved_references[f1], if necessary) |
|
3185 // rbx: MH.invokeExact_MT method (from f2) |
|
3186 |
|
3187 // Note: rax_mtype is already pushed (if necessary) by prepare_invoke |
|
3188 |
|
3189 // FIXME: profile the LambdaForm also |
|
3190 __ profile_final_call(rax); |
|
3191 __ profile_arguments_type(rdx, rbx_method, r13, true); |
|
3192 |
|
3193 __ jump_from_interpreted(rbx_method, rdx); |
|
3194 } |
|
3195 |
|
3196 |
|
3197 void TemplateTable::invokedynamic(int byte_no) { |
|
3198 transition(vtos, vtos); |
|
3199 assert(byte_no == f1_byte, "use this argument"); |
|
3200 |
|
3201 const Register rbx_method = rbx; |
|
3202 const Register rax_callsite = rax; |
|
3203 |
|
3204 prepare_invoke(byte_no, rbx_method, rax_callsite); |
|
3205 |
|
3206 // rax: CallSite object (from cpool->resolved_references[f1]) |
|
3207 // rbx: MH.linkToCallSite method (from f2) |
|
3208 |
|
3209 // Note: rax_callsite is already pushed by prepare_invoke |
|
3210 |
|
3211 // %%% should make a type profile for any invokedynamic that takes a ref argument |
|
3212 // profile this call |
|
3213 __ profile_call(r13); |
|
3214 __ profile_arguments_type(rdx, rbx_method, r13, false); |
|
3215 |
|
3216 __ verify_oop(rax_callsite); |
|
3217 |
|
3218 __ jump_from_interpreted(rbx_method, rdx); |
|
3219 } |
|
3220 |
|
3221 |
|
3222 //----------------------------------------------------------------------------- |
|
3223 // Allocation |
|
3224 |
|
3225 void TemplateTable::_new() { |
|
3226 transition(vtos, atos); |
|
3227 __ get_unsigned_2_byte_index_at_bcp(rdx, 1); |
|
3228 Label slow_case; |
|
3229 Label done; |
|
3230 Label initialize_header; |
|
3231 Label initialize_object; // including clearing the fields |
|
3232 Label allocate_shared; |
|
3233 |
|
3234 __ get_cpool_and_tags(rsi, rax); |
|
3235 // Make sure the class we're about to instantiate has been resolved. |
|
3236 // This is done before loading InstanceKlass to be consistent with the order |
|
3237 // how Constant Pool is updated (see ConstantPool::klass_at_put) |
|
3238 const int tags_offset = Array<u1>::base_offset_in_bytes(); |
|
3239 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), |
|
3240 JVM_CONSTANT_Class); |
|
3241 __ jcc(Assembler::notEqual, slow_case); |
|
3242 |
|
3243 // get InstanceKlass |
|
3244 __ movptr(rsi, Address(rsi, rdx, |
|
3245 Address::times_8, sizeof(ConstantPool))); |
|
3246 |
|
3247 // make sure klass is initialized & doesn't have finalizer |
|
3248 // make sure klass is fully initialized |
|
3249 __ cmpb(Address(rsi, |
|
3250 InstanceKlass::init_state_offset()), |
|
3251 InstanceKlass::fully_initialized); |
|
3252 __ jcc(Assembler::notEqual, slow_case); |
|
3253 |
|
3254 // get instance_size in InstanceKlass (scaled to a count of bytes) |
|
3255 __ movl(rdx, |
|
3256 Address(rsi, |
|
3257 Klass::layout_helper_offset())); |
|
3258 // test to see if it has a finalizer or is malformed in some way |
|
3259 __ testl(rdx, Klass::_lh_instance_slow_path_bit); |
|
3260 __ jcc(Assembler::notZero, slow_case); |
|
3261 |
|
3262 // Allocate the instance |
|
3263 // 1) Try to allocate in the TLAB |
|
3264 // 2) if fail and the object is large allocate in the shared Eden |
|
3265 // 3) if the above fails (or is not applicable), go to a slow case |
|
3266 // (creates a new TLAB, etc.) |
|
3267 |
|
3268 const bool allow_shared_alloc = |
|
3269 Universe::heap()->supports_inline_contig_alloc(); |
|
3270 |
|
3271 if (UseTLAB) { |
|
3272 __ movptr(rax, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset()))); |
|
3273 __ lea(rbx, Address(rax, rdx, Address::times_1)); |
|
3274 __ cmpptr(rbx, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset()))); |
|
3275 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case); |
|
3276 __ movptr(Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())), rbx); |
|
3277 if (ZeroTLAB) { |
|
3278 // the fields have been already cleared |
|
3279 __ jmp(initialize_header); |
|
3280 } else { |
|
3281 // initialize both the header and fields |
|
3282 __ jmp(initialize_object); |
|
3283 } |
|
3284 } |
|
3285 |
|
3286 // Allocation in the shared Eden, if allowed. |
|
3287 // |
|
3288 // rdx: instance size in bytes |
|
3289 if (allow_shared_alloc) { |
|
3290 __ bind(allocate_shared); |
|
3291 |
|
3292 ExternalAddress top((address)Universe::heap()->top_addr()); |
|
3293 ExternalAddress end((address)Universe::heap()->end_addr()); |
|
3294 |
|
3295 const Register RtopAddr = rscratch1; |
|
3296 const Register RendAddr = rscratch2; |
|
3297 |
|
3298 __ lea(RtopAddr, top); |
|
3299 __ lea(RendAddr, end); |
|
3300 __ movptr(rax, Address(RtopAddr, 0)); |
|
3301 |
|
3302 // For retries rax gets set by cmpxchgq |
|
3303 Label retry; |
|
3304 __ bind(retry); |
|
3305 __ lea(rbx, Address(rax, rdx, Address::times_1)); |
|
3306 __ cmpptr(rbx, Address(RendAddr, 0)); |
|
3307 __ jcc(Assembler::above, slow_case); |
|
3308 |
|
3309 // Compare rax with the top addr, and if still equal, store the new |
|
3310 // top addr in rbx at the address of the top addr pointer. Sets ZF if was |
|
3311 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs. |
|
3312 // |
|
3313 // rax: object begin |
|
3314 // rbx: object end |
|
3315 // rdx: instance size in bytes |
|
3316 if (os::is_MP()) { |
|
3317 __ lock(); |
|
3318 } |
|
3319 __ cmpxchgptr(rbx, Address(RtopAddr, 0)); |
|
3320 |
|
3321 // if someone beat us on the allocation, try again, otherwise continue |
|
3322 __ jcc(Assembler::notEqual, retry); |
|
3323 |
|
3324 __ incr_allocated_bytes(r15_thread, rdx, 0); |
|
3325 } |
|
3326 |
|
3327 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) { |
|
3328 // The object is initialized before the header. If the object size is |
|
3329 // zero, go directly to the header initialization. |
|
3330 __ bind(initialize_object); |
|
3331 __ decrementl(rdx, sizeof(oopDesc)); |
|
3332 __ jcc(Assembler::zero, initialize_header); |
|
3333 |
|
3334 // Initialize object fields |
|
3335 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code) |
|
3336 __ shrl(rdx, LogBytesPerLong); // divide by oopSize to simplify the loop |
|
3337 { |
|
3338 Label loop; |
|
3339 __ bind(loop); |
|
3340 __ movq(Address(rax, rdx, Address::times_8, |
|
3341 sizeof(oopDesc) - oopSize), |
|
3342 rcx); |
|
3343 __ decrementl(rdx); |
|
3344 __ jcc(Assembler::notZero, loop); |
|
3345 } |
|
3346 |
|
3347 // initialize object header only. |
|
3348 __ bind(initialize_header); |
|
3349 if (UseBiasedLocking) { |
|
3350 __ movptr(rscratch1, Address(rsi, Klass::prototype_header_offset())); |
|
3351 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), rscratch1); |
|
3352 } else { |
|
3353 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), |
|
3354 (intptr_t) markOopDesc::prototype()); // header (address 0x1) |
|
3355 } |
|
3356 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code) |
|
3357 __ store_klass_gap(rax, rcx); // zero klass gap for compressed oops |
|
3358 __ store_klass(rax, rsi); // store klass last |
|
3359 |
|
3360 { |
|
3361 SkipIfEqual skip(_masm, &DTraceAllocProbes, false); |
|
3362 // Trigger dtrace event for fastpath |
|
3363 __ push(atos); // save the return value |
|
3364 __ call_VM_leaf( |
|
3365 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax); |
|
3366 __ pop(atos); // restore the return value |
|
3367 |
|
3368 } |
|
3369 __ jmp(done); |
|
3370 } |
|
3371 |
|
3372 |
|
3373 // slow case |
|
3374 __ bind(slow_case); |
|
3375 __ get_constant_pool(c_rarg1); |
|
3376 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1); |
|
3377 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2); |
|
3378 __ verify_oop(rax); |
|
3379 |
|
3380 // continue |
|
3381 __ bind(done); |
|
3382 } |
|
3383 |
|
3384 void TemplateTable::newarray() { |
|
3385 transition(itos, atos); |
|
3386 __ load_unsigned_byte(c_rarg1, at_bcp(1)); |
|
3387 __ movl(c_rarg2, rax); |
|
3388 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), |
|
3389 c_rarg1, c_rarg2); |
|
3390 } |
|
3391 |
|
3392 void TemplateTable::anewarray() { |
|
3393 transition(itos, atos); |
|
3394 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1); |
|
3395 __ get_constant_pool(c_rarg1); |
|
3396 __ movl(c_rarg3, rax); |
|
3397 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), |
|
3398 c_rarg1, c_rarg2, c_rarg3); |
|
3399 } |
|
3400 |
|
3401 void TemplateTable::arraylength() { |
|
3402 transition(atos, itos); |
|
3403 __ null_check(rax, arrayOopDesc::length_offset_in_bytes()); |
|
3404 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes())); |
|
3405 } |
|
3406 |
|
3407 void TemplateTable::checkcast() { |
|
3408 transition(atos, atos); |
|
3409 Label done, is_null, ok_is_subtype, quicked, resolved; |
|
3410 __ testptr(rax, rax); // object is in rax |
|
3411 __ jcc(Assembler::zero, is_null); |
|
3412 |
|
3413 // Get cpool & tags index |
|
3414 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array |
|
3415 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index |
|
3416 // See if bytecode has already been quicked |
|
3417 __ cmpb(Address(rdx, rbx, |
|
3418 Address::times_1, |
|
3419 Array<u1>::base_offset_in_bytes()), |
|
3420 JVM_CONSTANT_Class); |
|
3421 __ jcc(Assembler::equal, quicked); |
|
3422 __ push(atos); // save receiver for result, and for GC |
|
3423 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); |
|
3424 // vm_result_2 has metadata result |
|
3425 __ get_vm_result_2(rax, r15_thread); |
|
3426 __ pop_ptr(rdx); // restore receiver |
|
3427 __ jmpb(resolved); |
|
3428 |
|
3429 // Get superklass in rax and subklass in rbx |
|
3430 __ bind(quicked); |
|
3431 __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check |
|
3432 __ movptr(rax, Address(rcx, rbx, |
|
3433 Address::times_8, sizeof(ConstantPool))); |
|
3434 |
|
3435 __ bind(resolved); |
|
3436 __ load_klass(rbx, rdx); |
|
3437 |
|
3438 // Generate subtype check. Blows rcx, rdi. Object in rdx. |
|
3439 // Superklass in rax. Subklass in rbx. |
|
3440 __ gen_subtype_check(rbx, ok_is_subtype); |
|
3441 |
|
3442 // Come here on failure |
|
3443 __ push_ptr(rdx); |
|
3444 // object is at TOS |
|
3445 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry)); |
|
3446 |
|
3447 // Come here on success |
|
3448 __ bind(ok_is_subtype); |
|
3449 __ mov(rax, rdx); // Restore object in rdx |
|
3450 |
|
3451 // Collect counts on whether this check-cast sees NULLs a lot or not. |
|
3452 if (ProfileInterpreter) { |
|
3453 __ jmp(done); |
|
3454 __ bind(is_null); |
|
3455 __ profile_null_seen(rcx); |
|
3456 } else { |
|
3457 __ bind(is_null); // same as 'done' |
|
3458 } |
|
3459 __ bind(done); |
|
3460 } |
|
3461 |
|
3462 void TemplateTable::instanceof() { |
|
3463 transition(atos, itos); |
|
3464 Label done, is_null, ok_is_subtype, quicked, resolved; |
|
3465 __ testptr(rax, rax); |
|
3466 __ jcc(Assembler::zero, is_null); |
|
3467 |
|
3468 // Get cpool & tags index |
|
3469 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array |
|
3470 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index |
|
3471 // See if bytecode has already been quicked |
|
3472 __ cmpb(Address(rdx, rbx, |
|
3473 Address::times_1, |
|
3474 Array<u1>::base_offset_in_bytes()), |
|
3475 JVM_CONSTANT_Class); |
|
3476 __ jcc(Assembler::equal, quicked); |
|
3477 |
|
3478 __ push(atos); // save receiver for result, and for GC |
|
3479 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); |
|
3480 // vm_result_2 has metadata result |
|
3481 __ get_vm_result_2(rax, r15_thread); |
|
3482 __ pop_ptr(rdx); // restore receiver |
|
3483 __ verify_oop(rdx); |
|
3484 __ load_klass(rdx, rdx); |
|
3485 __ jmpb(resolved); |
|
3486 |
|
3487 // Get superklass in rax and subklass in rdx |
|
3488 __ bind(quicked); |
|
3489 __ load_klass(rdx, rax); |
|
3490 __ movptr(rax, Address(rcx, rbx, |
|
3491 Address::times_8, sizeof(ConstantPool))); |
|
3492 |
|
3493 __ bind(resolved); |
|
3494 |
|
3495 // Generate subtype check. Blows rcx, rdi |
|
3496 // Superklass in rax. Subklass in rdx. |
|
3497 __ gen_subtype_check(rdx, ok_is_subtype); |
|
3498 |
|
3499 // Come here on failure |
|
3500 __ xorl(rax, rax); |
|
3501 __ jmpb(done); |
|
3502 // Come here on success |
|
3503 __ bind(ok_is_subtype); |
|
3504 __ movl(rax, 1); |
|
3505 |
|
3506 // Collect counts on whether this test sees NULLs a lot or not. |
|
3507 if (ProfileInterpreter) { |
|
3508 __ jmp(done); |
|
3509 __ bind(is_null); |
|
3510 __ profile_null_seen(rcx); |
|
3511 } else { |
|
3512 __ bind(is_null); // same as 'done' |
|
3513 } |
|
3514 __ bind(done); |
|
3515 // rax = 0: obj == NULL or obj is not an instanceof the specified klass |
|
3516 // rax = 1: obj != NULL and obj is an instanceof the specified klass |
|
3517 } |
|
3518 |
|
3519 //----------------------------------------------------------------------------- |
|
3520 // Breakpoints |
|
3521 void TemplateTable::_breakpoint() { |
|
3522 // Note: We get here even if we are single stepping.. |
|
3523 // jbug inists on setting breakpoints at every bytecode |
|
3524 // even if we are in single step mode. |
|
3525 |
|
3526 transition(vtos, vtos); |
|
3527 |
|
3528 // get the unpatched byte code |
|
3529 __ get_method(c_rarg1); |
|
3530 __ call_VM(noreg, |
|
3531 CAST_FROM_FN_PTR(address, |
|
3532 InterpreterRuntime::get_original_bytecode_at), |
|
3533 c_rarg1, r13); |
|
3534 __ mov(rbx, rax); |
|
3535 |
|
3536 // post the breakpoint event |
|
3537 __ get_method(c_rarg1); |
|
3538 __ call_VM(noreg, |
|
3539 CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), |
|
3540 c_rarg1, r13); |
|
3541 |
|
3542 // complete the execution of original bytecode |
|
3543 __ dispatch_only_normal(vtos); |
|
3544 } |
|
3545 |
|
3546 //----------------------------------------------------------------------------- |
|
3547 // Exceptions |
|
3548 |
|
3549 void TemplateTable::athrow() { |
|
3550 transition(atos, vtos); |
|
3551 __ null_check(rax); |
|
3552 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); |
|
3553 } |
|
3554 |
|
3555 //----------------------------------------------------------------------------- |
|
3556 // Synchronization |
|
3557 // |
|
3558 // Note: monitorenter & exit are symmetric routines; which is reflected |
|
3559 // in the assembly code structure as well |
|
3560 // |
|
3561 // Stack layout: |
|
3562 // |
|
3563 // [expressions ] <--- rsp = expression stack top |
|
3564 // .. |
|
3565 // [expressions ] |
|
3566 // [monitor entry] <--- monitor block top = expression stack bot |
|
3567 // .. |
|
3568 // [monitor entry] |
|
3569 // [frame data ] <--- monitor block bot |
|
3570 // ... |
|
3571 // [saved rbp ] <--- rbp |
|
3572 void TemplateTable::monitorenter() { |
|
3573 transition(atos, vtos); |
|
3574 |
|
3575 // check for NULL object |
|
3576 __ null_check(rax); |
|
3577 |
|
3578 const Address monitor_block_top( |
|
3579 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); |
|
3580 const Address monitor_block_bot( |
|
3581 rbp, frame::interpreter_frame_initial_sp_offset * wordSize); |
|
3582 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; |
|
3583 |
|
3584 Label allocated; |
|
3585 |
|
3586 // initialize entry pointer |
|
3587 __ xorl(c_rarg1, c_rarg1); // points to free slot or NULL |
|
3588 |
|
3589 // find a free slot in the monitor block (result in c_rarg1) |
|
3590 { |
|
3591 Label entry, loop, exit; |
|
3592 __ movptr(c_rarg3, monitor_block_top); // points to current entry, |
|
3593 // starting with top-most entry |
|
3594 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom |
|
3595 // of monitor block |
|
3596 __ jmpb(entry); |
|
3597 |
|
3598 __ bind(loop); |
|
3599 // check if current entry is used |
|
3600 __ cmpptr(Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD); |
|
3601 // if not used then remember entry in c_rarg1 |
|
3602 __ cmov(Assembler::equal, c_rarg1, c_rarg3); |
|
3603 // check if current entry is for same object |
|
3604 __ cmpptr(rax, Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes())); |
|
3605 // if same object then stop searching |
|
3606 __ jccb(Assembler::equal, exit); |
|
3607 // otherwise advance to next entry |
|
3608 __ addptr(c_rarg3, entry_size); |
|
3609 __ bind(entry); |
|
3610 // check if bottom reached |
|
3611 __ cmpptr(c_rarg3, c_rarg2); |
|
3612 // if not at bottom then check this entry |
|
3613 __ jcc(Assembler::notEqual, loop); |
|
3614 __ bind(exit); |
|
3615 } |
|
3616 |
|
3617 __ testptr(c_rarg1, c_rarg1); // check if a slot has been found |
|
3618 __ jcc(Assembler::notZero, allocated); // if found, continue with that one |
|
3619 |
|
3620 // allocate one if there's no free slot |
|
3621 { |
|
3622 Label entry, loop; |
|
3623 // 1. compute new pointers // rsp: old expression stack top |
|
3624 __ movptr(c_rarg1, monitor_block_bot); // c_rarg1: old expression stack bottom |
|
3625 __ subptr(rsp, entry_size); // move expression stack top |
|
3626 __ subptr(c_rarg1, entry_size); // move expression stack bottom |
|
3627 __ mov(c_rarg3, rsp); // set start value for copy loop |
|
3628 __ movptr(monitor_block_bot, c_rarg1); // set new monitor block bottom |
|
3629 __ jmp(entry); |
|
3630 // 2. move expression stack contents |
|
3631 __ bind(loop); |
|
3632 __ movptr(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack |
|
3633 // word from old location |
|
3634 __ movptr(Address(c_rarg3, 0), c_rarg2); // and store it at new location |
|
3635 __ addptr(c_rarg3, wordSize); // advance to next word |
|
3636 __ bind(entry); |
|
3637 __ cmpptr(c_rarg3, c_rarg1); // check if bottom reached |
|
3638 __ jcc(Assembler::notEqual, loop); // if not at bottom then |
|
3639 // copy next word |
|
3640 } |
|
3641 |
|
3642 // call run-time routine |
|
3643 // c_rarg1: points to monitor entry |
|
3644 __ bind(allocated); |
|
3645 |
|
3646 // Increment bcp to point to the next bytecode, so exception |
|
3647 // handling for async. exceptions work correctly. |
|
3648 // The object has already been poped from the stack, so the |
|
3649 // expression stack looks correct. |
|
3650 __ increment(r13); |
|
3651 |
|
3652 // store object |
|
3653 __ movptr(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), rax); |
|
3654 __ lock_object(c_rarg1); |
|
3655 |
|
3656 // check to make sure this monitor doesn't cause stack overflow after locking |
|
3657 __ save_bcp(); // in case of exception |
|
3658 __ generate_stack_overflow_check(0); |
|
3659 |
|
3660 // The bcp has already been incremented. Just need to dispatch to |
|
3661 // next instruction. |
|
3662 __ dispatch_next(vtos); |
|
3663 } |
|
3664 |
|
3665 |
|
3666 void TemplateTable::monitorexit() { |
|
3667 transition(atos, vtos); |
|
3668 |
|
3669 // check for NULL object |
|
3670 __ null_check(rax); |
|
3671 |
|
3672 const Address monitor_block_top( |
|
3673 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); |
|
3674 const Address monitor_block_bot( |
|
3675 rbp, frame::interpreter_frame_initial_sp_offset * wordSize); |
|
3676 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; |
|
3677 |
|
3678 Label found; |
|
3679 |
|
3680 // find matching slot |
|
3681 { |
|
3682 Label entry, loop; |
|
3683 __ movptr(c_rarg1, monitor_block_top); // points to current entry, |
|
3684 // starting with top-most entry |
|
3685 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom |
|
3686 // of monitor block |
|
3687 __ jmpb(entry); |
|
3688 |
|
3689 __ bind(loop); |
|
3690 // check if current entry is for same object |
|
3691 __ cmpptr(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); |
|
3692 // if same object then stop searching |
|
3693 __ jcc(Assembler::equal, found); |
|
3694 // otherwise advance to next entry |
|
3695 __ addptr(c_rarg1, entry_size); |
|
3696 __ bind(entry); |
|
3697 // check if bottom reached |
|
3698 __ cmpptr(c_rarg1, c_rarg2); |
|
3699 // if not at bottom then check this entry |
|
3700 __ jcc(Assembler::notEqual, loop); |
|
3701 } |
|
3702 |
|
3703 // error handling. Unlocking was not block-structured |
|
3704 __ call_VM(noreg, CAST_FROM_FN_PTR(address, |
|
3705 InterpreterRuntime::throw_illegal_monitor_state_exception)); |
|
3706 __ should_not_reach_here(); |
|
3707 |
|
3708 // call run-time routine |
|
3709 // rsi: points to monitor entry |
|
3710 __ bind(found); |
|
3711 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps) |
|
3712 __ unlock_object(c_rarg1); |
|
3713 __ pop_ptr(rax); // discard object |
|
3714 } |
|
3715 |
|
3716 |
|
3717 // Wide instructions |
|
3718 void TemplateTable::wide() { |
|
3719 transition(vtos, vtos); |
|
3720 __ load_unsigned_byte(rbx, at_bcp(1)); |
|
3721 __ lea(rscratch1, ExternalAddress((address)Interpreter::_wentry_point)); |
|
3722 __ jmp(Address(rscratch1, rbx, Address::times_8)); |
|
3723 // Note: the r13 increment step is part of the individual wide |
|
3724 // bytecode implementations |
|
3725 } |
|
3726 |
|
3727 |
|
3728 // Multi arrays |
|
3729 void TemplateTable::multianewarray() { |
|
3730 transition(vtos, atos); |
|
3731 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions |
|
3732 // last dim is on top of stack; we want address of first one: |
|
3733 // first_addr = last_addr + (ndims - 1) * wordSize |
|
3734 __ lea(c_rarg1, Address(rsp, rax, Address::times_8, -wordSize)); |
|
3735 call_VM(rax, |
|
3736 CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), |
|
3737 c_rarg1); |
|
3738 __ load_unsigned_byte(rbx, at_bcp(3)); |
|
3739 __ lea(rsp, Address(rsp, rbx, Address::times_8)); |
|
3740 } |
|
3741 #endif // !CC_INTERP |
|