|
1 /* |
|
2 * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #include "precompiled.hpp" |
|
26 #include "asm/macroAssembler.hpp" |
|
27 #include "interpreter/bytecodeHistogram.hpp" |
|
28 #include "interpreter/interpreter.hpp" |
|
29 #include "interpreter/interpreterGenerator.hpp" |
|
30 #include "interpreter/interpreterRuntime.hpp" |
|
31 #include "interpreter/interp_masm.hpp" |
|
32 #include "interpreter/templateTable.hpp" |
|
33 #include "oops/arrayOop.hpp" |
|
34 #include "oops/methodData.hpp" |
|
35 #include "oops/method.hpp" |
|
36 #include "oops/oop.inline.hpp" |
|
37 #include "prims/jvmtiExport.hpp" |
|
38 #include "prims/jvmtiThreadState.hpp" |
|
39 #include "runtime/arguments.hpp" |
|
40 #include "runtime/deoptimization.hpp" |
|
41 #include "runtime/frame.inline.hpp" |
|
42 #include "runtime/sharedRuntime.hpp" |
|
43 #include "runtime/stubRoutines.hpp" |
|
44 #include "runtime/synchronizer.hpp" |
|
45 #include "runtime/timer.hpp" |
|
46 #include "runtime/vframeArray.hpp" |
|
47 #include "utilities/debug.hpp" |
|
48 #include "utilities/macros.hpp" |
|
49 |
|
50 #define __ _masm-> |
|
51 |
|
52 #ifndef CC_INTERP |
|
53 |
|
54 // Global Register Names |
|
55 static const Register rbcp = LP64_ONLY(r13) NOT_LP64(rsi); |
|
56 static const Register rlocals = LP64_ONLY(r14) NOT_LP64(rdi); |
|
57 |
|
58 const int method_offset = frame::interpreter_frame_method_offset * wordSize; |
|
59 const int bcp_offset = frame::interpreter_frame_bcp_offset * wordSize; |
|
60 const int locals_offset = frame::interpreter_frame_locals_offset * wordSize; |
|
61 |
|
62 //----------------------------------------------------------------------------- |
|
63 |
|
64 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { |
|
65 address entry = __ pc(); |
|
66 |
|
67 #ifdef ASSERT |
|
68 { |
|
69 Label L; |
|
70 __ lea(rax, Address(rbp, |
|
71 frame::interpreter_frame_monitor_block_top_offset * |
|
72 wordSize)); |
|
73 __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack |
|
74 // grows negative) |
|
75 __ jcc(Assembler::aboveEqual, L); // check if frame is complete |
|
76 __ stop ("interpreter frame not set up"); |
|
77 __ bind(L); |
|
78 } |
|
79 #endif // ASSERT |
|
80 // Restore bcp under the assumption that the current frame is still |
|
81 // interpreted |
|
82 __ restore_bcp(); |
|
83 |
|
84 // expression stack must be empty before entering the VM if an |
|
85 // exception happened |
|
86 __ empty_expression_stack(); |
|
87 // throw exception |
|
88 __ call_VM(noreg, |
|
89 CAST_FROM_FN_PTR(address, |
|
90 InterpreterRuntime::throw_StackOverflowError)); |
|
91 return entry; |
|
92 } |
|
93 |
|
94 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler( |
|
95 const char* name) { |
|
96 address entry = __ pc(); |
|
97 // expression stack must be empty before entering the VM if an |
|
98 // exception happened |
|
99 __ empty_expression_stack(); |
|
100 // setup parameters |
|
101 // ??? convention: expect aberrant index in register ebx |
|
102 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); |
|
103 __ lea(rarg, ExternalAddress((address)name)); |
|
104 __ call_VM(noreg, |
|
105 CAST_FROM_FN_PTR(address, |
|
106 InterpreterRuntime:: |
|
107 throw_ArrayIndexOutOfBoundsException), |
|
108 rarg, rbx); |
|
109 return entry; |
|
110 } |
|
111 |
|
112 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { |
|
113 address entry = __ pc(); |
|
114 |
|
115 // object is at TOS |
|
116 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); |
|
117 __ pop(rarg); |
|
118 |
|
119 // expression stack must be empty before entering the VM if an |
|
120 // exception happened |
|
121 __ empty_expression_stack(); |
|
122 |
|
123 __ call_VM(noreg, |
|
124 CAST_FROM_FN_PTR(address, |
|
125 InterpreterRuntime:: |
|
126 throw_ClassCastException), |
|
127 rarg); |
|
128 return entry; |
|
129 } |
|
130 |
|
131 address TemplateInterpreterGenerator::generate_exception_handler_common( |
|
132 const char* name, const char* message, bool pass_oop) { |
|
133 assert(!pass_oop || message == NULL, "either oop or message but not both"); |
|
134 address entry = __ pc(); |
|
135 |
|
136 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); |
|
137 Register rarg2 = NOT_LP64(rbx) LP64_ONLY(c_rarg2); |
|
138 |
|
139 if (pass_oop) { |
|
140 // object is at TOS |
|
141 __ pop(rarg2); |
|
142 } |
|
143 // expression stack must be empty before entering the VM if an |
|
144 // exception happened |
|
145 __ empty_expression_stack(); |
|
146 // setup parameters |
|
147 __ lea(rarg, ExternalAddress((address)name)); |
|
148 if (pass_oop) { |
|
149 __ call_VM(rax, CAST_FROM_FN_PTR(address, |
|
150 InterpreterRuntime:: |
|
151 create_klass_exception), |
|
152 rarg, rarg2); |
|
153 } else { |
|
154 // kind of lame ExternalAddress can't take NULL because |
|
155 // external_word_Relocation will assert. |
|
156 if (message != NULL) { |
|
157 __ lea(rarg2, ExternalAddress((address)message)); |
|
158 } else { |
|
159 __ movptr(rarg2, NULL_WORD); |
|
160 } |
|
161 __ call_VM(rax, |
|
162 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), |
|
163 rarg, rarg2); |
|
164 } |
|
165 // throw exception |
|
166 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); |
|
167 return entry; |
|
168 } |
|
169 |
|
170 |
|
171 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { |
|
172 address entry = __ pc(); |
|
173 // NULL last_sp until next java call |
|
174 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); |
|
175 __ dispatch_next(state); |
|
176 return entry; |
|
177 } |
|
178 |
|
179 |
|
180 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { |
|
181 address entry = __ pc(); |
|
182 |
|
183 #ifndef _LP64 |
|
184 #ifdef COMPILER2 |
|
185 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases |
|
186 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { |
|
187 for (int i = 1; i < 8; i++) { |
|
188 __ ffree(i); |
|
189 } |
|
190 } else if (UseSSE < 2) { |
|
191 __ empty_FPU_stack(); |
|
192 } |
|
193 #endif // COMPILER2 |
|
194 if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { |
|
195 __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled"); |
|
196 } else { |
|
197 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled"); |
|
198 } |
|
199 |
|
200 if (state == ftos) { |
|
201 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter"); |
|
202 } else if (state == dtos) { |
|
203 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter"); |
|
204 } |
|
205 #endif // _LP64 |
|
206 |
|
207 // Restore stack bottom in case i2c adjusted stack |
|
208 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); |
|
209 // and NULL it as marker that esp is now tos until next java call |
|
210 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); |
|
211 |
|
212 __ restore_bcp(); |
|
213 __ restore_locals(); |
|
214 |
|
215 if (state == atos) { |
|
216 Register mdp = rbx; |
|
217 Register tmp = rcx; |
|
218 __ profile_return_type(mdp, rax, tmp); |
|
219 } |
|
220 |
|
221 const Register cache = rbx; |
|
222 const Register index = rcx; |
|
223 __ get_cache_and_index_at_bcp(cache, index, 1, index_size); |
|
224 |
|
225 const Register flags = cache; |
|
226 __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); |
|
227 __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask); |
|
228 __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale())); |
|
229 __ dispatch_next(state, step); |
|
230 |
|
231 return entry; |
|
232 } |
|
233 |
|
234 |
|
235 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) { |
|
236 address entry = __ pc(); |
|
237 |
|
238 #ifndef _LP64 |
|
239 if (state == ftos) { |
|
240 __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_deopt_entry_for in interpreter"); |
|
241 } else if (state == dtos) { |
|
242 __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_deopt_entry_for in interpreter"); |
|
243 } |
|
244 #endif // _LP64 |
|
245 |
|
246 // NULL last_sp until next java call |
|
247 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); |
|
248 __ restore_bcp(); |
|
249 __ restore_locals(); |
|
250 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); |
|
251 NOT_LP64(__ get_thread(thread)); |
|
252 #if INCLUDE_JVMCI |
|
253 // Check if we need to take lock at entry of synchronized method. |
|
254 if (UseJVMCICompiler) { |
|
255 Label L; |
|
256 __ cmpb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); |
|
257 __ jcc(Assembler::zero, L); |
|
258 // Clear flag. |
|
259 __ movb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); |
|
260 // Satisfy calling convention for lock_method(). |
|
261 __ get_method(rbx); |
|
262 // Take lock. |
|
263 lock_method(); |
|
264 __ bind(L); |
|
265 } |
|
266 #endif |
|
267 // handle exceptions |
|
268 { |
|
269 Label L; |
|
270 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); |
|
271 __ jcc(Assembler::zero, L); |
|
272 __ call_VM(noreg, |
|
273 CAST_FROM_FN_PTR(address, |
|
274 InterpreterRuntime::throw_pending_exception)); |
|
275 __ should_not_reach_here(); |
|
276 __ bind(L); |
|
277 } |
|
278 __ dispatch_next(state, step); |
|
279 return entry; |
|
280 } |
|
281 |
|
282 address TemplateInterpreterGenerator::generate_result_handler_for( |
|
283 BasicType type) { |
|
284 address entry = __ pc(); |
|
285 switch (type) { |
|
286 case T_BOOLEAN: __ c2bool(rax); break; |
|
287 #ifndef _LP64 |
|
288 case T_CHAR : __ andptr(rax, 0xFFFF); break; |
|
289 #else |
|
290 case T_CHAR : __ movzwl(rax, rax); break; |
|
291 #endif // _LP64 |
|
292 case T_BYTE : __ sign_extend_byte(rax); break; |
|
293 case T_SHORT : __ sign_extend_short(rax); break; |
|
294 case T_INT : /* nothing to do */ break; |
|
295 case T_LONG : /* nothing to do */ break; |
|
296 case T_VOID : /* nothing to do */ break; |
|
297 #ifndef _LP64 |
|
298 case T_DOUBLE : |
|
299 case T_FLOAT : |
|
300 { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); |
|
301 __ pop(t); // remove return address first |
|
302 // Must return a result for interpreter or compiler. In SSE |
|
303 // mode, results are returned in xmm0 and the FPU stack must |
|
304 // be empty. |
|
305 if (type == T_FLOAT && UseSSE >= 1) { |
|
306 // Load ST0 |
|
307 __ fld_d(Address(rsp, 0)); |
|
308 // Store as float and empty fpu stack |
|
309 __ fstp_s(Address(rsp, 0)); |
|
310 // and reload |
|
311 __ movflt(xmm0, Address(rsp, 0)); |
|
312 } else if (type == T_DOUBLE && UseSSE >= 2 ) { |
|
313 __ movdbl(xmm0, Address(rsp, 0)); |
|
314 } else { |
|
315 // restore ST0 |
|
316 __ fld_d(Address(rsp, 0)); |
|
317 } |
|
318 // and pop the temp |
|
319 __ addptr(rsp, 2 * wordSize); |
|
320 __ push(t); // restore return address |
|
321 } |
|
322 break; |
|
323 #else |
|
324 case T_FLOAT : /* nothing to do */ break; |
|
325 case T_DOUBLE : /* nothing to do */ break; |
|
326 #endif // _LP64 |
|
327 |
|
328 case T_OBJECT : |
|
329 // retrieve result from frame |
|
330 __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); |
|
331 // and verify it |
|
332 __ verify_oop(rax); |
|
333 break; |
|
334 default : ShouldNotReachHere(); |
|
335 } |
|
336 __ ret(0); // return from result handler |
|
337 return entry; |
|
338 } |
|
339 |
|
340 address TemplateInterpreterGenerator::generate_safept_entry_for( |
|
341 TosState state, |
|
342 address runtime_entry) { |
|
343 address entry = __ pc(); |
|
344 __ push(state); |
|
345 __ call_VM(noreg, runtime_entry); |
|
346 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); |
|
347 return entry; |
|
348 } |
|
349 |
|
350 |
|
351 |
|
352 // Helpers for commoning out cases in the various type of method entries. |
|
353 // |
|
354 |
|
355 |
|
356 // increment invocation count & check for overflow |
|
357 // |
|
358 // Note: checking for negative value instead of overflow |
|
359 // so we have a 'sticky' overflow test |
|
360 // |
|
361 // rbx: method |
|
362 // rcx: invocation counter |
|
363 // |
|
364 void InterpreterGenerator::generate_counter_incr( |
|
365 Label* overflow, |
|
366 Label* profile_method, |
|
367 Label* profile_method_continue) { |
|
368 Label done; |
|
369 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. |
|
370 if (TieredCompilation) { |
|
371 int increment = InvocationCounter::count_increment; |
|
372 Label no_mdo; |
|
373 if (ProfileInterpreter) { |
|
374 // Are we profiling? |
|
375 __ movptr(rax, Address(rbx, Method::method_data_offset())); |
|
376 __ testptr(rax, rax); |
|
377 __ jccb(Assembler::zero, no_mdo); |
|
378 // Increment counter in the MDO |
|
379 const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) + |
|
380 in_bytes(InvocationCounter::counter_offset())); |
|
381 const Address mask(rax, in_bytes(MethodData::invoke_mask_offset())); |
|
382 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow); |
|
383 __ jmp(done); |
|
384 } |
|
385 __ bind(no_mdo); |
|
386 // Increment counter in MethodCounters |
|
387 const Address invocation_counter(rax, |
|
388 MethodCounters::invocation_counter_offset() + |
|
389 InvocationCounter::counter_offset()); |
|
390 __ get_method_counters(rbx, rax, done); |
|
391 const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset())); |
|
392 __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, |
|
393 false, Assembler::zero, overflow); |
|
394 __ bind(done); |
|
395 } else { // not TieredCompilation |
|
396 const Address backedge_counter(rax, |
|
397 MethodCounters::backedge_counter_offset() + |
|
398 InvocationCounter::counter_offset()); |
|
399 const Address invocation_counter(rax, |
|
400 MethodCounters::invocation_counter_offset() + |
|
401 InvocationCounter::counter_offset()); |
|
402 |
|
403 __ get_method_counters(rbx, rax, done); |
|
404 |
|
405 if (ProfileInterpreter) { |
|
406 __ incrementl(Address(rax, |
|
407 MethodCounters::interpreter_invocation_counter_offset())); |
|
408 } |
|
409 // Update standard invocation counters |
|
410 __ movl(rcx, invocation_counter); |
|
411 __ incrementl(rcx, InvocationCounter::count_increment); |
|
412 __ movl(invocation_counter, rcx); // save invocation count |
|
413 |
|
414 __ movl(rax, backedge_counter); // load backedge counter |
|
415 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits |
|
416 |
|
417 __ addl(rcx, rax); // add both counters |
|
418 |
|
419 // profile_method is non-null only for interpreted method so |
|
420 // profile_method != NULL == !native_call |
|
421 |
|
422 if (ProfileInterpreter && profile_method != NULL) { |
|
423 // Test to see if we should create a method data oop |
|
424 __ movptr(rax, Address(rbx, Method::method_counters_offset())); |
|
425 __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset()))); |
|
426 __ jcc(Assembler::less, *profile_method_continue); |
|
427 |
|
428 // if no method data exists, go to profile_method |
|
429 __ test_method_data_pointer(rax, *profile_method); |
|
430 } |
|
431 |
|
432 __ movptr(rax, Address(rbx, Method::method_counters_offset())); |
|
433 __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset()))); |
|
434 __ jcc(Assembler::aboveEqual, *overflow); |
|
435 __ bind(done); |
|
436 } |
|
437 } |
|
438 |
|
439 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) { |
|
440 |
|
441 // Asm interpreter on entry |
|
442 // r14/rdi - locals |
|
443 // r13/rsi - bcp |
|
444 // rbx - method |
|
445 // rdx - cpool --- DOES NOT APPEAR TO BE TRUE |
|
446 // rbp - interpreter frame |
|
447 |
|
448 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ] |
|
449 // Everything as it was on entry |
|
450 // rdx is not restored. Doesn't appear to really be set. |
|
451 |
|
452 // InterpreterRuntime::frequency_counter_overflow takes two |
|
453 // arguments, the first (thread) is passed by call_VM, the second |
|
454 // indicates if the counter overflow occurs at a backwards branch |
|
455 // (NULL bcp). We pass zero for it. The call returns the address |
|
456 // of the verified entry point for the method or NULL if the |
|
457 // compilation did not complete (either went background or bailed |
|
458 // out). |
|
459 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); |
|
460 __ movl(rarg, 0); |
|
461 __ call_VM(noreg, |
|
462 CAST_FROM_FN_PTR(address, |
|
463 InterpreterRuntime::frequency_counter_overflow), |
|
464 rarg); |
|
465 |
|
466 __ movptr(rbx, Address(rbp, method_offset)); // restore Method* |
|
467 // Preserve invariant that r13/r14 contain bcp/locals of sender frame |
|
468 // and jump to the interpreted entry. |
|
469 __ jmp(*do_continue, relocInfo::none); |
|
470 } |
|
471 |
|
472 // See if we've got enough room on the stack for locals plus overhead. |
|
473 // The expression stack grows down incrementally, so the normal guard |
|
474 // page mechanism will work for that. |
|
475 // |
|
476 // NOTE: Since the additional locals are also always pushed (wasn't |
|
477 // obvious in generate_fixed_frame) so the guard should work for them |
|
478 // too. |
|
479 // |
|
480 // Args: |
|
481 // rdx: number of additional locals this frame needs (what we must check) |
|
482 // rbx: Method* |
|
483 // |
|
484 // Kills: |
|
485 // rax |
|
486 void InterpreterGenerator::generate_stack_overflow_check(void) { |
|
487 |
|
488 // monitor entry size: see picture of stack in frame_x86.hpp |
|
489 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; |
|
490 |
|
491 // total overhead size: entry_size + (saved rbp through expr stack |
|
492 // bottom). be sure to change this if you add/subtract anything |
|
493 // to/from the overhead area |
|
494 const int overhead_size = |
|
495 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; |
|
496 |
|
497 const int page_size = os::vm_page_size(); |
|
498 |
|
499 Label after_frame_check; |
|
500 |
|
501 // see if the frame is greater than one page in size. If so, |
|
502 // then we need to verify there is enough stack space remaining |
|
503 // for the additional locals. |
|
504 __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize); |
|
505 __ jcc(Assembler::belowEqual, after_frame_check); |
|
506 |
|
507 // compute rsp as if this were going to be the last frame on |
|
508 // the stack before the red zone |
|
509 |
|
510 Label after_frame_check_pop; |
|
511 const Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); |
|
512 #ifndef _LP64 |
|
513 __ push(thread); |
|
514 __ get_thread(thread); |
|
515 #endif |
|
516 |
|
517 const Address stack_base(thread, Thread::stack_base_offset()); |
|
518 const Address stack_size(thread, Thread::stack_size_offset()); |
|
519 |
|
520 // locals + overhead, in bytes |
|
521 __ mov(rax, rdx); |
|
522 __ shlptr(rax, Interpreter::logStackElementSize); // 2 slots per parameter. |
|
523 __ addptr(rax, overhead_size); |
|
524 |
|
525 #ifdef ASSERT |
|
526 Label stack_base_okay, stack_size_okay; |
|
527 // verify that thread stack base is non-zero |
|
528 __ cmpptr(stack_base, (int32_t)NULL_WORD); |
|
529 __ jcc(Assembler::notEqual, stack_base_okay); |
|
530 __ stop("stack base is zero"); |
|
531 __ bind(stack_base_okay); |
|
532 // verify that thread stack size is non-zero |
|
533 __ cmpptr(stack_size, 0); |
|
534 __ jcc(Assembler::notEqual, stack_size_okay); |
|
535 __ stop("stack size is zero"); |
|
536 __ bind(stack_size_okay); |
|
537 #endif |
|
538 |
|
539 // Add stack base to locals and subtract stack size |
|
540 __ addptr(rax, stack_base); |
|
541 __ subptr(rax, stack_size); |
|
542 |
|
543 // Use the maximum number of pages we might bang. |
|
544 const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages : |
|
545 (StackRedPages+StackYellowPages); |
|
546 |
|
547 // add in the red and yellow zone sizes |
|
548 __ addptr(rax, max_pages * page_size); |
|
549 |
|
550 // check against the current stack bottom |
|
551 __ cmpptr(rsp, rax); |
|
552 |
|
553 __ jcc(Assembler::above, after_frame_check_pop); |
|
554 NOT_LP64(__ pop(rsi)); // get saved bcp |
|
555 |
|
556 // Restore sender's sp as SP. This is necessary if the sender's |
|
557 // frame is an extended compiled frame (see gen_c2i_adapter()) |
|
558 // and safer anyway in case of JSR292 adaptations. |
|
559 |
|
560 __ pop(rax); // return address must be moved if SP is changed |
|
561 __ mov(rsp, rbcp); |
|
562 __ push(rax); |
|
563 |
|
564 // Note: the restored frame is not necessarily interpreted. |
|
565 // Use the shared runtime version of the StackOverflowError. |
|
566 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); |
|
567 __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry())); |
|
568 // all done with frame size check |
|
569 __ bind(after_frame_check_pop); |
|
570 NOT_LP64(__ pop(rsi)); |
|
571 |
|
572 // all done with frame size check |
|
573 __ bind(after_frame_check); |
|
574 } |
|
575 |
|
576 // Allocate monitor and lock method (asm interpreter) |
|
577 // |
|
578 // Args: |
|
579 // rbx: Method* |
|
580 // r14/rdi: locals |
|
581 // |
|
582 // Kills: |
|
583 // rax |
|
584 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) |
|
585 // rscratch1, rscratch2 (scratch regs) |
|
586 void TemplateInterpreterGenerator::lock_method() { |
|
587 // synchronize method |
|
588 const Address access_flags(rbx, Method::access_flags_offset()); |
|
589 const Address monitor_block_top( |
|
590 rbp, |
|
591 frame::interpreter_frame_monitor_block_top_offset * wordSize); |
|
592 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; |
|
593 |
|
594 #ifdef ASSERT |
|
595 { |
|
596 Label L; |
|
597 __ movl(rax, access_flags); |
|
598 __ testl(rax, JVM_ACC_SYNCHRONIZED); |
|
599 __ jcc(Assembler::notZero, L); |
|
600 __ stop("method doesn't need synchronization"); |
|
601 __ bind(L); |
|
602 } |
|
603 #endif // ASSERT |
|
604 |
|
605 // get synchronization object |
|
606 { |
|
607 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); |
|
608 Label done; |
|
609 __ movl(rax, access_flags); |
|
610 __ testl(rax, JVM_ACC_STATIC); |
|
611 // get receiver (assume this is frequent case) |
|
612 __ movptr(rax, Address(rlocals, Interpreter::local_offset_in_bytes(0))); |
|
613 __ jcc(Assembler::zero, done); |
|
614 __ movptr(rax, Address(rbx, Method::const_offset())); |
|
615 __ movptr(rax, Address(rax, ConstMethod::constants_offset())); |
|
616 __ movptr(rax, Address(rax, |
|
617 ConstantPool::pool_holder_offset_in_bytes())); |
|
618 __ movptr(rax, Address(rax, mirror_offset)); |
|
619 |
|
620 #ifdef ASSERT |
|
621 { |
|
622 Label L; |
|
623 __ testptr(rax, rax); |
|
624 __ jcc(Assembler::notZero, L); |
|
625 __ stop("synchronization object is NULL"); |
|
626 __ bind(L); |
|
627 } |
|
628 #endif // ASSERT |
|
629 |
|
630 __ bind(done); |
|
631 } |
|
632 |
|
633 // add space for monitor & lock |
|
634 __ subptr(rsp, entry_size); // add space for a monitor entry |
|
635 __ movptr(monitor_block_top, rsp); // set new monitor block top |
|
636 // store object |
|
637 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); |
|
638 const Register lockreg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); |
|
639 __ movptr(lockreg, rsp); // object address |
|
640 __ lock_object(lockreg); |
|
641 } |
|
642 |
|
643 // Generate a fixed interpreter frame. This is identical setup for |
|
644 // interpreted methods and for native methods hence the shared code. |
|
645 // |
|
646 // Args: |
|
647 // rax: return address |
|
648 // rbx: Method* |
|
649 // r14/rdi: pointer to locals |
|
650 // r13/rsi: sender sp |
|
651 // rdx: cp cache |
|
652 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { |
|
653 // initialize fixed part of activation frame |
|
654 __ push(rax); // save return address |
|
655 __ enter(); // save old & set new rbp |
|
656 __ push(rbcp); // set sender sp |
|
657 __ push((int)NULL_WORD); // leave last_sp as null |
|
658 __ movptr(rbcp, Address(rbx, Method::const_offset())); // get ConstMethod* |
|
659 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase |
|
660 __ push(rbx); // save Method* |
|
661 if (ProfileInterpreter) { |
|
662 Label method_data_continue; |
|
663 __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset()))); |
|
664 __ testptr(rdx, rdx); |
|
665 __ jcc(Assembler::zero, method_data_continue); |
|
666 __ addptr(rdx, in_bytes(MethodData::data_offset())); |
|
667 __ bind(method_data_continue); |
|
668 __ push(rdx); // set the mdp (method data pointer) |
|
669 } else { |
|
670 __ push(0); |
|
671 } |
|
672 |
|
673 __ movptr(rdx, Address(rbx, Method::const_offset())); |
|
674 __ movptr(rdx, Address(rdx, ConstMethod::constants_offset())); |
|
675 __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes())); |
|
676 __ push(rdx); // set constant pool cache |
|
677 __ push(rlocals); // set locals pointer |
|
678 if (native_call) { |
|
679 __ push(0); // no bcp |
|
680 } else { |
|
681 __ push(rbcp); // set bcp |
|
682 } |
|
683 __ push(0); // reserve word for pointer to expression stack bottom |
|
684 __ movptr(Address(rsp, 0), rsp); // set expression stack bottom |
|
685 } |
|
686 |
|
687 // End of helpers |
|
688 |
|
689 // Method entry for java.lang.ref.Reference.get. |
|
690 address InterpreterGenerator::generate_Reference_get_entry(void) { |
|
691 #if INCLUDE_ALL_GCS |
|
692 // Code: _aload_0, _getfield, _areturn |
|
693 // parameter size = 1 |
|
694 // |
|
695 // The code that gets generated by this routine is split into 2 parts: |
|
696 // 1. The "intrinsified" code for G1 (or any SATB based GC), |
|
697 // 2. The slow path - which is an expansion of the regular method entry. |
|
698 // |
|
699 // Notes:- |
|
700 // * In the G1 code we do not check whether we need to block for |
|
701 // a safepoint. If G1 is enabled then we must execute the specialized |
|
702 // code for Reference.get (except when the Reference object is null) |
|
703 // so that we can log the value in the referent field with an SATB |
|
704 // update buffer. |
|
705 // If the code for the getfield template is modified so that the |
|
706 // G1 pre-barrier code is executed when the current method is |
|
707 // Reference.get() then going through the normal method entry |
|
708 // will be fine. |
|
709 // * The G1 code can, however, check the receiver object (the instance |
|
710 // of java.lang.Reference) and jump to the slow path if null. If the |
|
711 // Reference object is null then we obviously cannot fetch the referent |
|
712 // and so we don't need to call the G1 pre-barrier. Thus we can use the |
|
713 // regular method entry code to generate the NPE. |
|
714 // |
|
715 // rbx: Method* |
|
716 |
|
717 // r13: senderSP must preserve for slow path, set SP to it on fast path |
|
718 |
|
719 address entry = __ pc(); |
|
720 |
|
721 const int referent_offset = java_lang_ref_Reference::referent_offset; |
|
722 guarantee(referent_offset > 0, "referent offset not initialized"); |
|
723 |
|
724 if (UseG1GC) { |
|
725 Label slow_path; |
|
726 // rbx: method |
|
727 |
|
728 // Check if local 0 != NULL |
|
729 // If the receiver is null then it is OK to jump to the slow path. |
|
730 __ movptr(rax, Address(rsp, wordSize)); |
|
731 |
|
732 __ testptr(rax, rax); |
|
733 __ jcc(Assembler::zero, slow_path); |
|
734 |
|
735 // rax: local 0 |
|
736 // rbx: method (but can be used as scratch now) |
|
737 // rdx: scratch |
|
738 // rdi: scratch |
|
739 |
|
740 // Preserve the sender sp in case the pre-barrier |
|
741 // calls the runtime |
|
742 NOT_LP64(__ push(rsi)); |
|
743 |
|
744 // Generate the G1 pre-barrier code to log the value of |
|
745 // the referent field in an SATB buffer. |
|
746 |
|
747 // Load the value of the referent field. |
|
748 const Address field_address(rax, referent_offset); |
|
749 __ load_heap_oop(rax, field_address); |
|
750 |
|
751 const Register sender_sp = NOT_LP64(rsi) LP64_ONLY(r13); |
|
752 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); |
|
753 NOT_LP64(__ get_thread(thread)); |
|
754 |
|
755 // Generate the G1 pre-barrier code to log the value of |
|
756 // the referent field in an SATB buffer. |
|
757 __ g1_write_barrier_pre(noreg /* obj */, |
|
758 rax /* pre_val */, |
|
759 thread /* thread */, |
|
760 rbx /* tmp */, |
|
761 true /* tosca_live */, |
|
762 true /* expand_call */); |
|
763 |
|
764 // _areturn |
|
765 NOT_LP64(__ pop(rsi)); // get sender sp |
|
766 __ pop(rdi); // get return address |
|
767 __ mov(rsp, sender_sp); // set sp to sender sp |
|
768 __ jmp(rdi); |
|
769 __ ret(0); |
|
770 |
|
771 // generate a vanilla interpreter entry as the slow path |
|
772 __ bind(slow_path); |
|
773 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); |
|
774 return entry; |
|
775 } |
|
776 #endif // INCLUDE_ALL_GCS |
|
777 |
|
778 // If G1 is not enabled then attempt to go through the accessor entry point |
|
779 // Reference.get is an accessor |
|
780 return NULL; |
|
781 } |
|
782 |
|
783 // Interpreter stub for calling a native method. (asm interpreter) |
|
784 // This sets up a somewhat different looking stack for calling the |
|
785 // native method than the typical interpreter frame setup. |
|
786 address InterpreterGenerator::generate_native_entry(bool synchronized) { |
|
787 // determine code generation flags |
|
788 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; |
|
789 |
|
790 // rbx: Method* |
|
791 // rbcp: sender sp |
|
792 |
|
793 address entry_point = __ pc(); |
|
794 |
|
795 const Address constMethod (rbx, Method::const_offset()); |
|
796 const Address access_flags (rbx, Method::access_flags_offset()); |
|
797 const Address size_of_parameters(rcx, ConstMethod:: |
|
798 size_of_parameters_offset()); |
|
799 |
|
800 |
|
801 // get parameter size (always needed) |
|
802 __ movptr(rcx, constMethod); |
|
803 __ load_unsigned_short(rcx, size_of_parameters); |
|
804 |
|
805 // native calls don't need the stack size check since they have no |
|
806 // expression stack and the arguments are already on the stack and |
|
807 // we only add a handful of words to the stack |
|
808 |
|
809 // rbx: Method* |
|
810 // rcx: size of parameters |
|
811 // rbcp: sender sp |
|
812 __ pop(rax); // get return address |
|
813 |
|
814 // for natives the size of locals is zero |
|
815 |
|
816 // compute beginning of parameters |
|
817 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); |
|
818 |
|
819 // add 2 zero-initialized slots for native calls |
|
820 // initialize result_handler slot |
|
821 __ push((int) NULL_WORD); |
|
822 // slot for oop temp |
|
823 // (static native method holder mirror/jni oop result) |
|
824 __ push((int) NULL_WORD); |
|
825 |
|
826 // initialize fixed part of activation frame |
|
827 generate_fixed_frame(true); |
|
828 |
|
829 // make sure method is native & not abstract |
|
830 #ifdef ASSERT |
|
831 __ movl(rax, access_flags); |
|
832 { |
|
833 Label L; |
|
834 __ testl(rax, JVM_ACC_NATIVE); |
|
835 __ jcc(Assembler::notZero, L); |
|
836 __ stop("tried to execute non-native method as native"); |
|
837 __ bind(L); |
|
838 } |
|
839 { |
|
840 Label L; |
|
841 __ testl(rax, JVM_ACC_ABSTRACT); |
|
842 __ jcc(Assembler::zero, L); |
|
843 __ stop("tried to execute abstract method in interpreter"); |
|
844 __ bind(L); |
|
845 } |
|
846 #endif |
|
847 |
|
848 // Since at this point in the method invocation the exception handler |
|
849 // would try to exit the monitor of synchronized methods which hasn't |
|
850 // been entered yet, we set the thread local variable |
|
851 // _do_not_unlock_if_synchronized to true. The remove_activation will |
|
852 // check this flag. |
|
853 |
|
854 const Register thread1 = NOT_LP64(rax) LP64_ONLY(r15_thread); |
|
855 NOT_LP64(__ get_thread(thread1)); |
|
856 const Address do_not_unlock_if_synchronized(thread1, |
|
857 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); |
|
858 __ movbool(do_not_unlock_if_synchronized, true); |
|
859 |
|
860 // increment invocation count & check for overflow |
|
861 Label invocation_counter_overflow; |
|
862 if (inc_counter) { |
|
863 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); |
|
864 } |
|
865 |
|
866 Label continue_after_compile; |
|
867 __ bind(continue_after_compile); |
|
868 |
|
869 bang_stack_shadow_pages(true); |
|
870 |
|
871 // reset the _do_not_unlock_if_synchronized flag |
|
872 NOT_LP64(__ get_thread(thread1)); |
|
873 __ movbool(do_not_unlock_if_synchronized, false); |
|
874 |
|
875 // check for synchronized methods |
|
876 // Must happen AFTER invocation_counter check and stack overflow check, |
|
877 // so method is not locked if overflows. |
|
878 if (synchronized) { |
|
879 lock_method(); |
|
880 } else { |
|
881 // no synchronization necessary |
|
882 #ifdef ASSERT |
|
883 { |
|
884 Label L; |
|
885 __ movl(rax, access_flags); |
|
886 __ testl(rax, JVM_ACC_SYNCHRONIZED); |
|
887 __ jcc(Assembler::zero, L); |
|
888 __ stop("method needs synchronization"); |
|
889 __ bind(L); |
|
890 } |
|
891 #endif |
|
892 } |
|
893 |
|
894 // start execution |
|
895 #ifdef ASSERT |
|
896 { |
|
897 Label L; |
|
898 const Address monitor_block_top(rbp, |
|
899 frame::interpreter_frame_monitor_block_top_offset * wordSize); |
|
900 __ movptr(rax, monitor_block_top); |
|
901 __ cmpptr(rax, rsp); |
|
902 __ jcc(Assembler::equal, L); |
|
903 __ stop("broken stack frame setup in interpreter"); |
|
904 __ bind(L); |
|
905 } |
|
906 #endif |
|
907 |
|
908 // jvmti support |
|
909 __ notify_method_entry(); |
|
910 |
|
911 // work registers |
|
912 const Register method = rbx; |
|
913 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); |
|
914 const Register t = NOT_LP64(rcx) LP64_ONLY(r11); |
|
915 |
|
916 // allocate space for parameters |
|
917 __ get_method(method); |
|
918 __ movptr(t, Address(method, Method::const_offset())); |
|
919 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); |
|
920 |
|
921 #ifndef _LP64 |
|
922 __ shlptr(t, Interpreter::logStackElementSize); |
|
923 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror |
|
924 __ subptr(rsp, t); |
|
925 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics |
|
926 #else |
|
927 __ shll(t, Interpreter::logStackElementSize); |
|
928 |
|
929 __ subptr(rsp, t); |
|
930 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows |
|
931 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI) |
|
932 #endif // _LP64 |
|
933 |
|
934 // get signature handler |
|
935 { |
|
936 Label L; |
|
937 __ movptr(t, Address(method, Method::signature_handler_offset())); |
|
938 __ testptr(t, t); |
|
939 __ jcc(Assembler::notZero, L); |
|
940 __ call_VM(noreg, |
|
941 CAST_FROM_FN_PTR(address, |
|
942 InterpreterRuntime::prepare_native_call), |
|
943 method); |
|
944 __ get_method(method); |
|
945 __ movptr(t, Address(method, Method::signature_handler_offset())); |
|
946 __ bind(L); |
|
947 } |
|
948 |
|
949 // call signature handler |
|
950 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals, |
|
951 "adjust this code"); |
|
952 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp, |
|
953 "adjust this code"); |
|
954 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == NOT_LP64(t) LP64_ONLY(rscratch1), |
|
955 "adjust this code"); |
|
956 |
|
957 // The generated handlers do not touch RBX (the method oop). |
|
958 // However, large signatures cannot be cached and are generated |
|
959 // each time here. The slow-path generator can do a GC on return, |
|
960 // so we must reload it after the call. |
|
961 __ call(t); |
|
962 __ get_method(method); // slow path can do a GC, reload RBX |
|
963 |
|
964 |
|
965 // result handler is in rax |
|
966 // set result handler |
|
967 __ movptr(Address(rbp, |
|
968 (frame::interpreter_frame_result_handler_offset) * wordSize), |
|
969 rax); |
|
970 |
|
971 // pass mirror handle if static call |
|
972 { |
|
973 Label L; |
|
974 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); |
|
975 __ movl(t, Address(method, Method::access_flags_offset())); |
|
976 __ testl(t, JVM_ACC_STATIC); |
|
977 __ jcc(Assembler::zero, L); |
|
978 // get mirror |
|
979 __ movptr(t, Address(method, Method::const_offset())); |
|
980 __ movptr(t, Address(t, ConstMethod::constants_offset())); |
|
981 __ movptr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes())); |
|
982 __ movptr(t, Address(t, mirror_offset)); |
|
983 // copy mirror into activation frame |
|
984 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), |
|
985 t); |
|
986 // pass handle to mirror |
|
987 #ifndef _LP64 |
|
988 __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); |
|
989 __ movptr(Address(rsp, wordSize), t); |
|
990 #else |
|
991 __ lea(c_rarg1, |
|
992 Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); |
|
993 #endif // _LP64 |
|
994 __ bind(L); |
|
995 } |
|
996 |
|
997 // get native function entry point |
|
998 { |
|
999 Label L; |
|
1000 __ movptr(rax, Address(method, Method::native_function_offset())); |
|
1001 ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); |
|
1002 __ cmpptr(rax, unsatisfied.addr()); |
|
1003 __ jcc(Assembler::notEqual, L); |
|
1004 __ call_VM(noreg, |
|
1005 CAST_FROM_FN_PTR(address, |
|
1006 InterpreterRuntime::prepare_native_call), |
|
1007 method); |
|
1008 __ get_method(method); |
|
1009 __ movptr(rax, Address(method, Method::native_function_offset())); |
|
1010 __ bind(L); |
|
1011 } |
|
1012 |
|
1013 // pass JNIEnv |
|
1014 #ifndef _LP64 |
|
1015 __ get_thread(thread); |
|
1016 __ lea(t, Address(thread, JavaThread::jni_environment_offset())); |
|
1017 __ movptr(Address(rsp, 0), t); |
|
1018 |
|
1019 // set_last_Java_frame_before_call |
|
1020 // It is enough that the pc() |
|
1021 // points into the right code segment. It does not have to be the correct return pc. |
|
1022 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); |
|
1023 #else |
|
1024 __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset())); |
|
1025 |
|
1026 // It is enough that the pc() points into the right code |
|
1027 // segment. It does not have to be the correct return pc. |
|
1028 __ set_last_Java_frame(rsp, rbp, (address) __ pc()); |
|
1029 #endif // _LP64 |
|
1030 |
|
1031 // change thread state |
|
1032 #ifdef ASSERT |
|
1033 { |
|
1034 Label L; |
|
1035 __ movl(t, Address(thread, JavaThread::thread_state_offset())); |
|
1036 __ cmpl(t, _thread_in_Java); |
|
1037 __ jcc(Assembler::equal, L); |
|
1038 __ stop("Wrong thread state in native stub"); |
|
1039 __ bind(L); |
|
1040 } |
|
1041 #endif |
|
1042 |
|
1043 // Change state to native |
|
1044 |
|
1045 __ movl(Address(thread, JavaThread::thread_state_offset()), |
|
1046 _thread_in_native); |
|
1047 |
|
1048 // Call the native method. |
|
1049 __ call(rax); |
|
1050 // 32: result potentially in rdx:rax or ST0 |
|
1051 // 64: result potentially in rax or xmm0 |
|
1052 |
|
1053 // Verify or restore cpu control state after JNI call |
|
1054 __ restore_cpu_control_state_after_jni(); |
|
1055 |
|
1056 // NOTE: The order of these pushes is known to frame::interpreter_frame_result |
|
1057 // in order to extract the result of a method call. If the order of these |
|
1058 // pushes change or anything else is added to the stack then the code in |
|
1059 // interpreter_frame_result must also change. |
|
1060 |
|
1061 #ifndef _LP64 |
|
1062 // save potential result in ST(0) & rdx:rax |
|
1063 // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 - |
|
1064 // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers) |
|
1065 // It is safe to do this push because state is _thread_in_native and return address will be found |
|
1066 // via _last_native_pc and not via _last_jave_sp |
|
1067 |
|
1068 // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result. |
|
1069 // If the order changes or anything else is added to the stack the code in |
|
1070 // interpreter_frame_result will have to be changed. |
|
1071 |
|
1072 { Label L; |
|
1073 Label push_double; |
|
1074 ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT)); |
|
1075 ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE)); |
|
1076 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), |
|
1077 float_handler.addr()); |
|
1078 __ jcc(Assembler::equal, push_double); |
|
1079 __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize), |
|
1080 double_handler.addr()); |
|
1081 __ jcc(Assembler::notEqual, L); |
|
1082 __ bind(push_double); |
|
1083 __ push_d(); // FP values are returned using the FPU, so push FPU contents (even if UseSSE > 0). |
|
1084 __ bind(L); |
|
1085 } |
|
1086 #else |
|
1087 __ push(dtos); |
|
1088 #endif // _LP64 |
|
1089 |
|
1090 __ push(ltos); |
|
1091 |
|
1092 // change thread state |
|
1093 NOT_LP64(__ get_thread(thread)); |
|
1094 __ movl(Address(thread, JavaThread::thread_state_offset()), |
|
1095 _thread_in_native_trans); |
|
1096 |
|
1097 if (os::is_MP()) { |
|
1098 if (UseMembar) { |
|
1099 // Force this write out before the read below |
|
1100 __ membar(Assembler::Membar_mask_bits( |
|
1101 Assembler::LoadLoad | Assembler::LoadStore | |
|
1102 Assembler::StoreLoad | Assembler::StoreStore)); |
|
1103 } else { |
|
1104 // Write serialization page so VM thread can do a pseudo remote membar. |
|
1105 // We use the current thread pointer to calculate a thread specific |
|
1106 // offset to write to within the page. This minimizes bus traffic |
|
1107 // due to cache line collision. |
|
1108 __ serialize_memory(thread, rcx); |
|
1109 } |
|
1110 } |
|
1111 |
|
1112 #ifndef _LP64 |
|
1113 if (AlwaysRestoreFPU) { |
|
1114 // Make sure the control word is correct. |
|
1115 __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); |
|
1116 } |
|
1117 #endif // _LP64 |
|
1118 |
|
1119 // check for safepoint operation in progress and/or pending suspend requests |
|
1120 { |
|
1121 Label Continue; |
|
1122 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), |
|
1123 SafepointSynchronize::_not_synchronized); |
|
1124 |
|
1125 Label L; |
|
1126 __ jcc(Assembler::notEqual, L); |
|
1127 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0); |
|
1128 __ jcc(Assembler::equal, Continue); |
|
1129 __ bind(L); |
|
1130 |
|
1131 // Don't use call_VM as it will see a possible pending exception |
|
1132 // and forward it and never return here preventing us from |
|
1133 // clearing _last_native_pc down below. Also can't use |
|
1134 // call_VM_leaf either as it will check to see if r13 & r14 are |
|
1135 // preserved and correspond to the bcp/locals pointers. So we do a |
|
1136 // runtime call by hand. |
|
1137 // |
|
1138 #ifndef _LP64 |
|
1139 __ push(thread); |
|
1140 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, |
|
1141 JavaThread::check_special_condition_for_native_trans))); |
|
1142 __ increment(rsp, wordSize); |
|
1143 __ get_thread(thread); |
|
1144 #else |
|
1145 __ mov(c_rarg0, r15_thread); |
|
1146 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) |
|
1147 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows |
|
1148 __ andptr(rsp, -16); // align stack as required by ABI |
|
1149 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); |
|
1150 __ mov(rsp, r12); // restore sp |
|
1151 __ reinit_heapbase(); |
|
1152 #endif // _LP64 |
|
1153 __ bind(Continue); |
|
1154 } |
|
1155 |
|
1156 // change thread state |
|
1157 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java); |
|
1158 |
|
1159 // reset_last_Java_frame |
|
1160 __ reset_last_Java_frame(thread, true, true); |
|
1161 |
|
1162 // reset handle block |
|
1163 __ movptr(t, Address(thread, JavaThread::active_handles_offset())); |
|
1164 __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD); |
|
1165 |
|
1166 // If result is an oop unbox and store it in frame where gc will see it |
|
1167 // and result handler will pick it up |
|
1168 |
|
1169 { |
|
1170 Label no_oop, store_result; |
|
1171 __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); |
|
1172 __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize)); |
|
1173 __ jcc(Assembler::notEqual, no_oop); |
|
1174 // retrieve result |
|
1175 __ pop(ltos); |
|
1176 __ testptr(rax, rax); |
|
1177 __ jcc(Assembler::zero, store_result); |
|
1178 __ movptr(rax, Address(rax, 0)); |
|
1179 __ bind(store_result); |
|
1180 __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax); |
|
1181 // keep stack depth as expected by pushing oop which will eventually be discarded |
|
1182 __ push(ltos); |
|
1183 __ bind(no_oop); |
|
1184 } |
|
1185 |
|
1186 |
|
1187 { |
|
1188 Label no_reguard; |
|
1189 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), |
|
1190 JavaThread::stack_guard_yellow_disabled); |
|
1191 __ jcc(Assembler::notEqual, no_reguard); |
|
1192 |
|
1193 __ pusha(); // XXX only save smashed registers |
|
1194 #ifndef _LP64 |
|
1195 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); |
|
1196 __ popa(); |
|
1197 #else |
|
1198 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) |
|
1199 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows |
|
1200 __ andptr(rsp, -16); // align stack as required by ABI |
|
1201 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); |
|
1202 __ mov(rsp, r12); // restore sp |
|
1203 __ popa(); // XXX only restore smashed registers |
|
1204 __ reinit_heapbase(); |
|
1205 #endif // _LP64 |
|
1206 |
|
1207 __ bind(no_reguard); |
|
1208 } |
|
1209 |
|
1210 |
|
1211 // The method register is junk from after the thread_in_native transition |
|
1212 // until here. Also can't call_VM until the bcp has been |
|
1213 // restored. Need bcp for throwing exception below so get it now. |
|
1214 __ get_method(method); |
|
1215 |
|
1216 // restore to have legal interpreter frame, i.e., bci == 0 <=> code_base() |
|
1217 __ movptr(rbcp, Address(method, Method::const_offset())); // get ConstMethod* |
|
1218 __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase |
|
1219 |
|
1220 // handle exceptions (exception handling will handle unlocking!) |
|
1221 { |
|
1222 Label L; |
|
1223 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); |
|
1224 __ jcc(Assembler::zero, L); |
|
1225 // Note: At some point we may want to unify this with the code |
|
1226 // used in call_VM_base(); i.e., we should use the |
|
1227 // StubRoutines::forward_exception code. For now this doesn't work |
|
1228 // here because the rsp is not correctly set at this point. |
|
1229 __ MacroAssembler::call_VM(noreg, |
|
1230 CAST_FROM_FN_PTR(address, |
|
1231 InterpreterRuntime::throw_pending_exception)); |
|
1232 __ should_not_reach_here(); |
|
1233 __ bind(L); |
|
1234 } |
|
1235 |
|
1236 // do unlocking if necessary |
|
1237 { |
|
1238 Label L; |
|
1239 __ movl(t, Address(method, Method::access_flags_offset())); |
|
1240 __ testl(t, JVM_ACC_SYNCHRONIZED); |
|
1241 __ jcc(Assembler::zero, L); |
|
1242 // the code below should be shared with interpreter macro |
|
1243 // assembler implementation |
|
1244 { |
|
1245 Label unlock; |
|
1246 // BasicObjectLock will be first in list, since this is a |
|
1247 // synchronized method. However, need to check that the object |
|
1248 // has not been unlocked by an explicit monitorexit bytecode. |
|
1249 const Address monitor(rbp, |
|
1250 (intptr_t)(frame::interpreter_frame_initial_sp_offset * |
|
1251 wordSize - (int)sizeof(BasicObjectLock))); |
|
1252 |
|
1253 const Register regmon = NOT_LP64(rdx) LP64_ONLY(c_rarg1); |
|
1254 |
|
1255 // monitor expect in c_rarg1 for slow unlock path |
|
1256 __ lea(regmon, monitor); // address of first monitor |
|
1257 |
|
1258 __ movptr(t, Address(regmon, BasicObjectLock::obj_offset_in_bytes())); |
|
1259 __ testptr(t, t); |
|
1260 __ jcc(Assembler::notZero, unlock); |
|
1261 |
|
1262 // Entry already unlocked, need to throw exception |
|
1263 __ MacroAssembler::call_VM(noreg, |
|
1264 CAST_FROM_FN_PTR(address, |
|
1265 InterpreterRuntime::throw_illegal_monitor_state_exception)); |
|
1266 __ should_not_reach_here(); |
|
1267 |
|
1268 __ bind(unlock); |
|
1269 __ unlock_object(regmon); |
|
1270 } |
|
1271 __ bind(L); |
|
1272 } |
|
1273 |
|
1274 // jvmti support |
|
1275 // Note: This must happen _after_ handling/throwing any exceptions since |
|
1276 // the exception handler code notifies the runtime of method exits |
|
1277 // too. If this happens before, method entry/exit notifications are |
|
1278 // not properly paired (was bug - gri 11/22/99). |
|
1279 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); |
|
1280 |
|
1281 // restore potential result in edx:eax, call result handler to |
|
1282 // restore potential result in ST0 & handle result |
|
1283 |
|
1284 __ pop(ltos); |
|
1285 LP64_ONLY( __ pop(dtos)); |
|
1286 |
|
1287 __ movptr(t, Address(rbp, |
|
1288 (frame::interpreter_frame_result_handler_offset) * wordSize)); |
|
1289 __ call(t); |
|
1290 |
|
1291 // remove activation |
|
1292 __ movptr(t, Address(rbp, |
|
1293 frame::interpreter_frame_sender_sp_offset * |
|
1294 wordSize)); // get sender sp |
|
1295 __ leave(); // remove frame anchor |
|
1296 __ pop(rdi); // get return address |
|
1297 __ mov(rsp, t); // set sp to sender sp |
|
1298 __ jmp(rdi); |
|
1299 |
|
1300 if (inc_counter) { |
|
1301 // Handle overflow of counter and compile method |
|
1302 __ bind(invocation_counter_overflow); |
|
1303 generate_counter_overflow(&continue_after_compile); |
|
1304 } |
|
1305 |
|
1306 return entry_point; |
|
1307 } |
|
1308 |
|
1309 // |
|
1310 // Generic interpreted method entry to (asm) interpreter |
|
1311 // |
|
1312 address InterpreterGenerator::generate_normal_entry(bool synchronized) { |
|
1313 // determine code generation flags |
|
1314 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; |
|
1315 |
|
1316 // ebx: Method* |
|
1317 // rbcp: sender sp |
|
1318 address entry_point = __ pc(); |
|
1319 |
|
1320 const Address constMethod(rbx, Method::const_offset()); |
|
1321 const Address access_flags(rbx, Method::access_flags_offset()); |
|
1322 const Address size_of_parameters(rdx, |
|
1323 ConstMethod::size_of_parameters_offset()); |
|
1324 const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset()); |
|
1325 |
|
1326 |
|
1327 // get parameter size (always needed) |
|
1328 __ movptr(rdx, constMethod); |
|
1329 __ load_unsigned_short(rcx, size_of_parameters); |
|
1330 |
|
1331 // rbx: Method* |
|
1332 // rcx: size of parameters |
|
1333 // rbcp: sender_sp (could differ from sp+wordSize if we were called via c2i ) |
|
1334 |
|
1335 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words |
|
1336 __ subl(rdx, rcx); // rdx = no. of additional locals |
|
1337 |
|
1338 // YYY |
|
1339 // __ incrementl(rdx); |
|
1340 // __ andl(rdx, -2); |
|
1341 |
|
1342 // see if we've got enough room on the stack for locals plus overhead. |
|
1343 generate_stack_overflow_check(); |
|
1344 |
|
1345 // get return address |
|
1346 __ pop(rax); |
|
1347 |
|
1348 // compute beginning of parameters |
|
1349 __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); |
|
1350 |
|
1351 // rdx - # of additional locals |
|
1352 // allocate space for locals |
|
1353 // explicitly initialize locals |
|
1354 { |
|
1355 Label exit, loop; |
|
1356 __ testl(rdx, rdx); |
|
1357 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 |
|
1358 __ bind(loop); |
|
1359 __ push((int) NULL_WORD); // initialize local variables |
|
1360 __ decrementl(rdx); // until everything initialized |
|
1361 __ jcc(Assembler::greater, loop); |
|
1362 __ bind(exit); |
|
1363 } |
|
1364 |
|
1365 // initialize fixed part of activation frame |
|
1366 generate_fixed_frame(false); |
|
1367 |
|
1368 // make sure method is not native & not abstract |
|
1369 #ifdef ASSERT |
|
1370 __ movl(rax, access_flags); |
|
1371 { |
|
1372 Label L; |
|
1373 __ testl(rax, JVM_ACC_NATIVE); |
|
1374 __ jcc(Assembler::zero, L); |
|
1375 __ stop("tried to execute native method as non-native"); |
|
1376 __ bind(L); |
|
1377 } |
|
1378 { |
|
1379 Label L; |
|
1380 __ testl(rax, JVM_ACC_ABSTRACT); |
|
1381 __ jcc(Assembler::zero, L); |
|
1382 __ stop("tried to execute abstract method in interpreter"); |
|
1383 __ bind(L); |
|
1384 } |
|
1385 #endif |
|
1386 |
|
1387 // Since at this point in the method invocation the exception |
|
1388 // handler would try to exit the monitor of synchronized methods |
|
1389 // which hasn't been entered yet, we set the thread local variable |
|
1390 // _do_not_unlock_if_synchronized to true. The remove_activation |
|
1391 // will check this flag. |
|
1392 |
|
1393 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); |
|
1394 NOT_LP64(__ get_thread(thread)); |
|
1395 const Address do_not_unlock_if_synchronized(thread, |
|
1396 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); |
|
1397 __ movbool(do_not_unlock_if_synchronized, true); |
|
1398 |
|
1399 __ profile_parameters_type(rax, rcx, rdx); |
|
1400 // increment invocation count & check for overflow |
|
1401 Label invocation_counter_overflow; |
|
1402 Label profile_method; |
|
1403 Label profile_method_continue; |
|
1404 if (inc_counter) { |
|
1405 generate_counter_incr(&invocation_counter_overflow, |
|
1406 &profile_method, |
|
1407 &profile_method_continue); |
|
1408 if (ProfileInterpreter) { |
|
1409 __ bind(profile_method_continue); |
|
1410 } |
|
1411 } |
|
1412 |
|
1413 Label continue_after_compile; |
|
1414 __ bind(continue_after_compile); |
|
1415 |
|
1416 // check for synchronized interpreted methods |
|
1417 bang_stack_shadow_pages(false); |
|
1418 |
|
1419 // reset the _do_not_unlock_if_synchronized flag |
|
1420 NOT_LP64(__ get_thread(thread)); |
|
1421 __ movbool(do_not_unlock_if_synchronized, false); |
|
1422 |
|
1423 // check for synchronized methods |
|
1424 // Must happen AFTER invocation_counter check and stack overflow check, |
|
1425 // so method is not locked if overflows. |
|
1426 if (synchronized) { |
|
1427 // Allocate monitor and lock method |
|
1428 lock_method(); |
|
1429 } else { |
|
1430 // no synchronization necessary |
|
1431 #ifdef ASSERT |
|
1432 { |
|
1433 Label L; |
|
1434 __ movl(rax, access_flags); |
|
1435 __ testl(rax, JVM_ACC_SYNCHRONIZED); |
|
1436 __ jcc(Assembler::zero, L); |
|
1437 __ stop("method needs synchronization"); |
|
1438 __ bind(L); |
|
1439 } |
|
1440 #endif |
|
1441 } |
|
1442 |
|
1443 // start execution |
|
1444 #ifdef ASSERT |
|
1445 { |
|
1446 Label L; |
|
1447 const Address monitor_block_top (rbp, |
|
1448 frame::interpreter_frame_monitor_block_top_offset * wordSize); |
|
1449 __ movptr(rax, monitor_block_top); |
|
1450 __ cmpptr(rax, rsp); |
|
1451 __ jcc(Assembler::equal, L); |
|
1452 __ stop("broken stack frame setup in interpreter"); |
|
1453 __ bind(L); |
|
1454 } |
|
1455 #endif |
|
1456 |
|
1457 // jvmti support |
|
1458 __ notify_method_entry(); |
|
1459 |
|
1460 __ dispatch_next(vtos); |
|
1461 |
|
1462 // invocation counter overflow |
|
1463 if (inc_counter) { |
|
1464 if (ProfileInterpreter) { |
|
1465 // We have decided to profile this method in the interpreter |
|
1466 __ bind(profile_method); |
|
1467 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); |
|
1468 __ set_method_data_pointer_for_bcp(); |
|
1469 __ get_method(rbx); |
|
1470 __ jmp(profile_method_continue); |
|
1471 } |
|
1472 // Handle overflow of counter and compile method |
|
1473 __ bind(invocation_counter_overflow); |
|
1474 generate_counter_overflow(&continue_after_compile); |
|
1475 } |
|
1476 |
|
1477 return entry_point; |
|
1478 } |
|
1479 |
|
1480 //----------------------------------------------------------------------------- |
|
1481 // Exceptions |
|
1482 |
|
1483 void TemplateInterpreterGenerator::generate_throw_exception() { |
|
1484 // Entry point in previous activation (i.e., if the caller was |
|
1485 // interpreted) |
|
1486 Interpreter::_rethrow_exception_entry = __ pc(); |
|
1487 // Restore sp to interpreter_frame_last_sp even though we are going |
|
1488 // to empty the expression stack for the exception processing. |
|
1489 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); |
|
1490 // rax: exception |
|
1491 // rdx: return address/pc that threw exception |
|
1492 __ restore_bcp(); // r13/rsi points to call/send |
|
1493 __ restore_locals(); |
|
1494 LP64_ONLY(__ reinit_heapbase()); // restore r12 as heapbase. |
|
1495 // Entry point for exceptions thrown within interpreter code |
|
1496 Interpreter::_throw_exception_entry = __ pc(); |
|
1497 // expression stack is undefined here |
|
1498 // rax: exception |
|
1499 // r13/rsi: exception bcp |
|
1500 __ verify_oop(rax); |
|
1501 Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1); |
|
1502 LP64_ONLY(__ mov(c_rarg1, rax)); |
|
1503 |
|
1504 // expression stack must be empty before entering the VM in case of |
|
1505 // an exception |
|
1506 __ empty_expression_stack(); |
|
1507 // find exception handler address and preserve exception oop |
|
1508 __ call_VM(rdx, |
|
1509 CAST_FROM_FN_PTR(address, |
|
1510 InterpreterRuntime::exception_handler_for_exception), |
|
1511 rarg); |
|
1512 // rax: exception handler entry point |
|
1513 // rdx: preserved exception oop |
|
1514 // r13/rsi: bcp for exception handler |
|
1515 __ push_ptr(rdx); // push exception which is now the only value on the stack |
|
1516 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!) |
|
1517 |
|
1518 // If the exception is not handled in the current frame the frame is |
|
1519 // removed and the exception is rethrown (i.e. exception |
|
1520 // continuation is _rethrow_exception). |
|
1521 // |
|
1522 // Note: At this point the bci is still the bxi for the instruction |
|
1523 // which caused the exception and the expression stack is |
|
1524 // empty. Thus, for any VM calls at this point, GC will find a legal |
|
1525 // oop map (with empty expression stack). |
|
1526 |
|
1527 // In current activation |
|
1528 // tos: exception |
|
1529 // esi: exception bcp |
|
1530 |
|
1531 // |
|
1532 // JVMTI PopFrame support |
|
1533 // |
|
1534 |
|
1535 Interpreter::_remove_activation_preserving_args_entry = __ pc(); |
|
1536 __ empty_expression_stack(); |
|
1537 // Set the popframe_processing bit in pending_popframe_condition |
|
1538 // indicating that we are currently handling popframe, so that |
|
1539 // call_VMs that may happen later do not trigger new popframe |
|
1540 // handling cycles. |
|
1541 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); |
|
1542 NOT_LP64(__ get_thread(thread)); |
|
1543 __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset())); |
|
1544 __ orl(rdx, JavaThread::popframe_processing_bit); |
|
1545 __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx); |
|
1546 |
|
1547 { |
|
1548 // Check to see whether we are returning to a deoptimized frame. |
|
1549 // (The PopFrame call ensures that the caller of the popped frame is |
|
1550 // either interpreted or compiled and deoptimizes it if compiled.) |
|
1551 // In this case, we can't call dispatch_next() after the frame is |
|
1552 // popped, but instead must save the incoming arguments and restore |
|
1553 // them after deoptimization has occurred. |
|
1554 // |
|
1555 // Note that we don't compare the return PC against the |
|
1556 // deoptimization blob's unpack entry because of the presence of |
|
1557 // adapter frames in C2. |
|
1558 Label caller_not_deoptimized; |
|
1559 Register rarg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); |
|
1560 __ movptr(rarg, Address(rbp, frame::return_addr_offset * wordSize)); |
|
1561 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, |
|
1562 InterpreterRuntime::interpreter_contains), rarg); |
|
1563 __ testl(rax, rax); |
|
1564 __ jcc(Assembler::notZero, caller_not_deoptimized); |
|
1565 |
|
1566 // Compute size of arguments for saving when returning to |
|
1567 // deoptimized caller |
|
1568 __ get_method(rax); |
|
1569 __ movptr(rax, Address(rax, Method::const_offset())); |
|
1570 __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod:: |
|
1571 size_of_parameters_offset()))); |
|
1572 __ shll(rax, Interpreter::logStackElementSize); |
|
1573 __ restore_locals(); |
|
1574 __ subptr(rlocals, rax); |
|
1575 __ addptr(rlocals, wordSize); |
|
1576 // Save these arguments |
|
1577 NOT_LP64(__ get_thread(thread)); |
|
1578 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, |
|
1579 Deoptimization:: |
|
1580 popframe_preserve_args), |
|
1581 thread, rax, rlocals); |
|
1582 |
|
1583 __ remove_activation(vtos, rdx, |
|
1584 /* throw_monitor_exception */ false, |
|
1585 /* install_monitor_exception */ false, |
|
1586 /* notify_jvmdi */ false); |
|
1587 |
|
1588 // Inform deoptimization that it is responsible for restoring |
|
1589 // these arguments |
|
1590 NOT_LP64(__ get_thread(thread)); |
|
1591 __ movl(Address(thread, JavaThread::popframe_condition_offset()), |
|
1592 JavaThread::popframe_force_deopt_reexecution_bit); |
|
1593 |
|
1594 // Continue in deoptimization handler |
|
1595 __ jmp(rdx); |
|
1596 |
|
1597 __ bind(caller_not_deoptimized); |
|
1598 } |
|
1599 |
|
1600 __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */ |
|
1601 /* throw_monitor_exception */ false, |
|
1602 /* install_monitor_exception */ false, |
|
1603 /* notify_jvmdi */ false); |
|
1604 |
|
1605 // Finish with popframe handling |
|
1606 // A previous I2C followed by a deoptimization might have moved the |
|
1607 // outgoing arguments further up the stack. PopFrame expects the |
|
1608 // mutations to those outgoing arguments to be preserved and other |
|
1609 // constraints basically require this frame to look exactly as |
|
1610 // though it had previously invoked an interpreted activation with |
|
1611 // no space between the top of the expression stack (current |
|
1612 // last_sp) and the top of stack. Rather than force deopt to |
|
1613 // maintain this kind of invariant all the time we call a small |
|
1614 // fixup routine to move the mutated arguments onto the top of our |
|
1615 // expression stack if necessary. |
|
1616 #ifndef _LP64 |
|
1617 __ mov(rax, rsp); |
|
1618 __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); |
|
1619 __ get_thread(thread); |
|
1620 // PC must point into interpreter here |
|
1621 __ set_last_Java_frame(thread, noreg, rbp, __ pc()); |
|
1622 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx); |
|
1623 __ get_thread(thread); |
|
1624 #else |
|
1625 __ mov(c_rarg1, rsp); |
|
1626 __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); |
|
1627 // PC must point into interpreter here |
|
1628 __ set_last_Java_frame(noreg, rbp, __ pc()); |
|
1629 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2); |
|
1630 #endif |
|
1631 __ reset_last_Java_frame(thread, true, true); |
|
1632 |
|
1633 // Restore the last_sp and null it out |
|
1634 __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); |
|
1635 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD); |
|
1636 |
|
1637 __ restore_bcp(); |
|
1638 __ restore_locals(); |
|
1639 // The method data pointer was incremented already during |
|
1640 // call profiling. We have to restore the mdp for the current bcp. |
|
1641 if (ProfileInterpreter) { |
|
1642 __ set_method_data_pointer_for_bcp(); |
|
1643 } |
|
1644 |
|
1645 // Clear the popframe condition flag |
|
1646 NOT_LP64(__ get_thread(thread)); |
|
1647 __ movl(Address(thread, JavaThread::popframe_condition_offset()), |
|
1648 JavaThread::popframe_inactive); |
|
1649 |
|
1650 #if INCLUDE_JVMTI |
|
1651 { |
|
1652 Label L_done; |
|
1653 const Register local0 = rlocals; |
|
1654 |
|
1655 __ cmpb(Address(rbcp, 0), Bytecodes::_invokestatic); |
|
1656 __ jcc(Assembler::notEqual, L_done); |
|
1657 |
|
1658 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. |
|
1659 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. |
|
1660 |
|
1661 __ get_method(rdx); |
|
1662 __ movptr(rax, Address(local0, 0)); |
|
1663 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rbcp); |
|
1664 |
|
1665 __ testptr(rax, rax); |
|
1666 __ jcc(Assembler::zero, L_done); |
|
1667 |
|
1668 __ movptr(Address(rbx, 0), rax); |
|
1669 __ bind(L_done); |
|
1670 } |
|
1671 #endif // INCLUDE_JVMTI |
|
1672 |
|
1673 __ dispatch_next(vtos); |
|
1674 // end of PopFrame support |
|
1675 |
|
1676 Interpreter::_remove_activation_entry = __ pc(); |
|
1677 |
|
1678 // preserve exception over this code sequence |
|
1679 __ pop_ptr(rax); |
|
1680 NOT_LP64(__ get_thread(thread)); |
|
1681 __ movptr(Address(thread, JavaThread::vm_result_offset()), rax); |
|
1682 // remove the activation (without doing throws on illegalMonitorExceptions) |
|
1683 __ remove_activation(vtos, rdx, false, true, false); |
|
1684 // restore exception |
|
1685 NOT_LP64(__ get_thread(thread)); |
|
1686 __ get_vm_result(rax, thread); |
|
1687 |
|
1688 // In between activations - previous activation type unknown yet |
|
1689 // compute continuation point - the continuation point expects the |
|
1690 // following registers set up: |
|
1691 // |
|
1692 // rax: exception |
|
1693 // rdx: return address/pc that threw exception |
|
1694 // rsp: expression stack of caller |
|
1695 // rbp: ebp of caller |
|
1696 __ push(rax); // save exception |
|
1697 __ push(rdx); // save return address |
|
1698 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, |
|
1699 SharedRuntime::exception_handler_for_return_address), |
|
1700 thread, rdx); |
|
1701 __ mov(rbx, rax); // save exception handler |
|
1702 __ pop(rdx); // restore return address |
|
1703 __ pop(rax); // restore exception |
|
1704 // Note that an "issuing PC" is actually the next PC after the call |
|
1705 __ jmp(rbx); // jump to exception |
|
1706 // handler of caller |
|
1707 } |
|
1708 |
|
1709 |
|
1710 // |
|
1711 // JVMTI ForceEarlyReturn support |
|
1712 // |
|
1713 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { |
|
1714 address entry = __ pc(); |
|
1715 |
|
1716 __ restore_bcp(); |
|
1717 __ restore_locals(); |
|
1718 __ empty_expression_stack(); |
|
1719 __ load_earlyret_value(state); // 32 bits returns value in rdx, so don't reuse |
|
1720 |
|
1721 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread); |
|
1722 NOT_LP64(__ get_thread(thread)); |
|
1723 __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset())); |
|
1724 Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset()); |
|
1725 |
|
1726 // Clear the earlyret state |
|
1727 __ movl(cond_addr, JvmtiThreadState::earlyret_inactive); |
|
1728 |
|
1729 __ remove_activation(state, rsi, |
|
1730 false, /* throw_monitor_exception */ |
|
1731 false, /* install_monitor_exception */ |
|
1732 true); /* notify_jvmdi */ |
|
1733 __ jmp(rsi); |
|
1734 |
|
1735 return entry; |
|
1736 } // end of ForceEarlyReturn support |
|
1737 |
|
1738 |
|
1739 //----------------------------------------------------------------------------- |
|
1740 // Helper for vtos entry point generation |
|
1741 |
|
1742 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, |
|
1743 address& bep, |
|
1744 address& cep, |
|
1745 address& sep, |
|
1746 address& aep, |
|
1747 address& iep, |
|
1748 address& lep, |
|
1749 address& fep, |
|
1750 address& dep, |
|
1751 address& vep) { |
|
1752 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); |
|
1753 Label L; |
|
1754 aep = __ pc(); __ push_ptr(); __ jmp(L); |
|
1755 #ifndef _LP64 |
|
1756 fep = __ pc(); __ push(ftos); __ jmp(L); |
|
1757 dep = __ pc(); __ push(dtos); __ jmp(L); |
|
1758 #else |
|
1759 fep = __ pc(); __ push_f(xmm0); __ jmp(L); |
|
1760 dep = __ pc(); __ push_d(xmm0); __ jmp(L); |
|
1761 #endif // _LP64 |
|
1762 lep = __ pc(); __ push_l(); __ jmp(L); |
|
1763 bep = cep = sep = |
|
1764 iep = __ pc(); __ push_i(); |
|
1765 vep = __ pc(); |
|
1766 __ bind(L); |
|
1767 generate_and_dispatch(t); |
|
1768 } |
|
1769 |
|
1770 |
|
1771 //----------------------------------------------------------------------------- |
|
1772 // Generation of individual instructions |
|
1773 |
|
1774 // helpers for generate_and_dispatch |
|
1775 |
|
1776 |
|
1777 InterpreterGenerator::InterpreterGenerator(StubQueue* code) |
|
1778 : TemplateInterpreterGenerator(code) { |
|
1779 generate_all(); // down here so it can be "virtual" |
|
1780 } |
|
1781 |
|
1782 //----------------------------------------------------------------------------- |
|
1783 |
|
1784 // Non-product code |
|
1785 #ifndef PRODUCT |
|
1786 |
|
1787 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { |
|
1788 address entry = __ pc(); |
|
1789 |
|
1790 #ifndef _LP64 |
|
1791 // prepare expression stack |
|
1792 __ pop(rcx); // pop return address so expression stack is 'pure' |
|
1793 __ push(state); // save tosca |
|
1794 |
|
1795 // pass tosca registers as arguments & call tracer |
|
1796 __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), rcx, rax, rdx); |
|
1797 __ mov(rcx, rax); // make sure return address is not destroyed by pop(state) |
|
1798 __ pop(state); // restore tosca |
|
1799 |
|
1800 // return |
|
1801 __ jmp(rcx); |
|
1802 #else |
|
1803 __ push(state); |
|
1804 __ push(c_rarg0); |
|
1805 __ push(c_rarg1); |
|
1806 __ push(c_rarg2); |
|
1807 __ push(c_rarg3); |
|
1808 __ mov(c_rarg2, rax); // Pass itos |
|
1809 #ifdef _WIN64 |
|
1810 __ movflt(xmm3, xmm0); // Pass ftos |
|
1811 #endif |
|
1812 __ call_VM(noreg, |
|
1813 CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), |
|
1814 c_rarg1, c_rarg2, c_rarg3); |
|
1815 __ pop(c_rarg3); |
|
1816 __ pop(c_rarg2); |
|
1817 __ pop(c_rarg1); |
|
1818 __ pop(c_rarg0); |
|
1819 __ pop(state); |
|
1820 __ ret(0); // return from result handler |
|
1821 #endif // _LP64 |
|
1822 |
|
1823 return entry; |
|
1824 } |
|
1825 |
|
1826 void TemplateInterpreterGenerator::count_bytecode() { |
|
1827 __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value)); |
|
1828 } |
|
1829 |
|
1830 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { |
|
1831 __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()])); |
|
1832 } |
|
1833 |
|
1834 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { |
|
1835 __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index)); |
|
1836 __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes); |
|
1837 __ orl(rbx, |
|
1838 ((int) t->bytecode()) << |
|
1839 BytecodePairHistogram::log2_number_of_codes); |
|
1840 __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx); |
|
1841 __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters)); |
|
1842 __ incrementl(Address(rscratch1, rbx, Address::times_4)); |
|
1843 } |
|
1844 |
|
1845 |
|
1846 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { |
|
1847 // Call a little run-time stub to avoid blow-up for each bytecode. |
|
1848 // The run-time runtime saves the right registers, depending on |
|
1849 // the tosca in-state for the given template. |
|
1850 |
|
1851 assert(Interpreter::trace_code(t->tos_in()) != NULL, |
|
1852 "entry must have been generated"); |
|
1853 #ifndef _LP64 |
|
1854 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); |
|
1855 #else |
|
1856 __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) |
|
1857 __ andptr(rsp, -16); // align stack as required by ABI |
|
1858 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); |
|
1859 __ mov(rsp, r12); // restore sp |
|
1860 __ reinit_heapbase(); |
|
1861 #endif // _LP64 |
|
1862 } |
|
1863 |
|
1864 |
|
1865 void TemplateInterpreterGenerator::stop_interpreter_at() { |
|
1866 Label L; |
|
1867 __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value), |
|
1868 StopInterpreterAt); |
|
1869 __ jcc(Assembler::notEqual, L); |
|
1870 __ int3(); |
|
1871 __ bind(L); |
|
1872 } |
|
1873 #endif // !PRODUCT |
|
1874 #endif // ! CC_INTERP |