author | aph |
Wed, 21 Jan 2015 14:38:48 -0800 | |
changeset 29186 | d5e61d9743aa |
parent 29183 | 0cc8699f7372 |
child 29195 | 7d6208ea1775 |
permissions | -rw-r--r-- |
29183 | 1 |
/* |
2 |
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. |
|
3 |
* Copyright (c) 2014, Red Hat Inc. All rights reserved. |
|
4 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
5 |
* |
|
6 |
* This code is free software; you can redistribute it and/or modify it |
|
7 |
* under the terms of the GNU General Public License version 2 only, as |
|
8 |
* published by the Free Software Foundation. |
|
9 |
* |
|
10 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
11 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
12 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
13 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
14 |
* accompanied this code). |
|
15 |
* |
|
16 |
* You should have received a copy of the GNU General Public License version |
|
17 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
18 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
19 |
* |
|
20 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
21 |
* or visit www.oracle.com if you need additional information or have any |
|
22 |
* questions. |
|
23 |
* |
|
24 |
*/ |
|
25 |
||
26 |
#include "precompiled.hpp" |
|
27 |
#include "asm/macroAssembler.hpp" |
|
28 |
#include "interpreter/bytecodeHistogram.hpp" |
|
29 |
#include "interpreter/interpreter.hpp" |
|
30 |
#include "interpreter/interpreterGenerator.hpp" |
|
31 |
#include "interpreter/interpreterRuntime.hpp" |
|
32 |
#include "interpreter/interp_masm.hpp" |
|
33 |
#include "interpreter/templateTable.hpp" |
|
34 |
#include "interpreter/bytecodeTracer.hpp" |
|
35 |
#include "oops/arrayOop.hpp" |
|
36 |
#include "oops/methodData.hpp" |
|
37 |
#include "oops/method.hpp" |
|
38 |
#include "oops/oop.inline.hpp" |
|
39 |
#include "prims/jvmtiExport.hpp" |
|
40 |
#include "prims/jvmtiThreadState.hpp" |
|
41 |
#include "runtime/arguments.hpp" |
|
42 |
#include "runtime/deoptimization.hpp" |
|
43 |
#include "runtime/frame.inline.hpp" |
|
44 |
#include "runtime/sharedRuntime.hpp" |
|
45 |
#include "runtime/stubRoutines.hpp" |
|
46 |
#include "runtime/synchronizer.hpp" |
|
47 |
#include "runtime/timer.hpp" |
|
48 |
#include "runtime/vframeArray.hpp" |
|
49 |
#include "utilities/debug.hpp" |
|
50 |
#include <sys/types.h> |
|
51 |
||
52 |
#ifndef PRODUCT |
|
53 |
#include "oops/method.hpp" |
|
54 |
#endif // !PRODUCT |
|
55 |
||
56 |
#ifdef BUILTIN_SIM |
|
57 |
#include "../../../../../../simulator/simulator.hpp" |
|
58 |
#endif |
|
59 |
||
60 |
#define __ _masm-> |
|
61 |
||
62 |
#ifndef CC_INTERP |
|
63 |
||
64 |
//----------------------------------------------------------------------------- |
|
65 |
||
66 |
extern "C" void entry(CodeBuffer*); |
|
67 |
||
68 |
//----------------------------------------------------------------------------- |
|
69 |
||
70 |
address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { |
|
71 |
address entry = __ pc(); |
|
72 |
||
73 |
#ifdef ASSERT |
|
74 |
{ |
|
75 |
Label L; |
|
76 |
__ ldr(rscratch1, Address(rfp, |
|
77 |
frame::interpreter_frame_monitor_block_top_offset * |
|
78 |
wordSize)); |
|
79 |
__ mov(rscratch2, sp); |
|
80 |
__ cmp(rscratch1, rscratch2); // maximal rsp for current rfp (stack |
|
81 |
// grows negative) |
|
82 |
__ br(Assembler::HS, L); // check if frame is complete |
|
83 |
__ stop ("interpreter frame not set up"); |
|
84 |
__ bind(L); |
|
85 |
} |
|
86 |
#endif // ASSERT |
|
87 |
// Restore bcp under the assumption that the current frame is still |
|
88 |
// interpreted |
|
89 |
__ restore_bcp(); |
|
90 |
||
91 |
// expression stack must be empty before entering the VM if an |
|
92 |
// exception happened |
|
93 |
__ empty_expression_stack(); |
|
94 |
// throw exception |
|
95 |
__ call_VM(noreg, |
|
96 |
CAST_FROM_FN_PTR(address, |
|
97 |
InterpreterRuntime::throw_StackOverflowError)); |
|
98 |
return entry; |
|
99 |
} |
|
100 |
||
101 |
address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler( |
|
102 |
const char* name) { |
|
103 |
address entry = __ pc(); |
|
104 |
// expression stack must be empty before entering the VM if an |
|
105 |
// exception happened |
|
106 |
__ empty_expression_stack(); |
|
107 |
// setup parameters |
|
108 |
// ??? convention: expect aberrant index in register r1 |
|
109 |
__ movw(c_rarg2, r1); |
|
110 |
__ mov(c_rarg1, (address)name); |
|
111 |
__ call_VM(noreg, |
|
112 |
CAST_FROM_FN_PTR(address, |
|
113 |
InterpreterRuntime:: |
|
114 |
throw_ArrayIndexOutOfBoundsException), |
|
115 |
c_rarg1, c_rarg2); |
|
116 |
return entry; |
|
117 |
} |
|
118 |
||
119 |
address TemplateInterpreterGenerator::generate_ClassCastException_handler() { |
|
120 |
address entry = __ pc(); |
|
121 |
||
122 |
// object is at TOS |
|
123 |
__ pop(c_rarg1); |
|
124 |
||
125 |
// expression stack must be empty before entering the VM if an |
|
126 |
// exception happened |
|
127 |
__ empty_expression_stack(); |
|
128 |
||
129 |
__ call_VM(noreg, |
|
130 |
CAST_FROM_FN_PTR(address, |
|
131 |
InterpreterRuntime:: |
|
132 |
throw_ClassCastException), |
|
133 |
c_rarg1); |
|
134 |
return entry; |
|
135 |
} |
|
136 |
||
137 |
address TemplateInterpreterGenerator::generate_exception_handler_common( |
|
138 |
const char* name, const char* message, bool pass_oop) { |
|
139 |
assert(!pass_oop || message == NULL, "either oop or message but not both"); |
|
140 |
address entry = __ pc(); |
|
141 |
if (pass_oop) { |
|
142 |
// object is at TOS |
|
143 |
__ pop(c_rarg2); |
|
144 |
} |
|
145 |
// expression stack must be empty before entering the VM if an |
|
146 |
// exception happened |
|
147 |
__ empty_expression_stack(); |
|
148 |
// setup parameters |
|
149 |
__ lea(c_rarg1, Address((address)name)); |
|
150 |
if (pass_oop) { |
|
151 |
__ call_VM(r0, CAST_FROM_FN_PTR(address, |
|
152 |
InterpreterRuntime:: |
|
153 |
create_klass_exception), |
|
154 |
c_rarg1, c_rarg2); |
|
155 |
} else { |
|
156 |
// kind of lame ExternalAddress can't take NULL because |
|
157 |
// external_word_Relocation will assert. |
|
158 |
if (message != NULL) { |
|
159 |
__ lea(c_rarg2, Address((address)message)); |
|
160 |
} else { |
|
161 |
__ mov(c_rarg2, NULL_WORD); |
|
162 |
} |
|
163 |
__ call_VM(r0, |
|
164 |
CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), |
|
165 |
c_rarg1, c_rarg2); |
|
166 |
} |
|
167 |
// throw exception |
|
168 |
__ b(address(Interpreter::throw_exception_entry())); |
|
169 |
return entry; |
|
170 |
} |
|
171 |
||
172 |
address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { |
|
173 |
address entry = __ pc(); |
|
174 |
// NULL last_sp until next java call |
|
175 |
__ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); |
|
176 |
__ dispatch_next(state); |
|
177 |
return entry; |
|
178 |
} |
|
179 |
||
180 |
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { |
|
181 |
address entry = __ pc(); |
|
182 |
||
183 |
// Restore stack bottom in case i2c adjusted stack |
|
184 |
__ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); |
|
185 |
// and NULL it as marker that esp is now tos until next java call |
|
186 |
__ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); |
|
187 |
__ restore_bcp(); |
|
188 |
__ restore_locals(); |
|
189 |
__ restore_constant_pool_cache(); |
|
190 |
__ get_method(rmethod); |
|
191 |
||
192 |
// Pop N words from the stack |
|
193 |
__ get_cache_and_index_at_bcp(r1, r2, 1, index_size); |
|
194 |
__ ldr(r1, Address(r1, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); |
|
195 |
__ andr(r1, r1, ConstantPoolCacheEntry::parameter_size_mask); |
|
196 |
||
197 |
__ add(esp, esp, r1, Assembler::LSL, 3); |
|
198 |
||
199 |
// Restore machine SP |
|
200 |
__ ldr(rscratch1, Address(rmethod, Method::const_offset())); |
|
201 |
__ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset())); |
|
202 |
__ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2); |
|
203 |
__ ldr(rscratch2, |
|
204 |
Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize)); |
|
205 |
__ sub(rscratch1, rscratch2, rscratch1, ext::uxtw, 3); |
|
206 |
__ andr(sp, rscratch1, -16); |
|
207 |
||
208 |
#ifndef PRODUCT |
|
209 |
// tell the simulator that the method has been reentered |
|
210 |
if (NotifySimulator) { |
|
211 |
__ notify(Assembler::method_reentry); |
|
212 |
} |
|
213 |
#endif |
|
214 |
__ get_dispatch(); |
|
215 |
__ dispatch_next(state, step); |
|
216 |
||
217 |
return entry; |
|
218 |
} |
|
219 |
||
220 |
address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, |
|
221 |
int step) { |
|
222 |
address entry = __ pc(); |
|
223 |
__ restore_bcp(); |
|
224 |
__ restore_locals(); |
|
225 |
__ restore_constant_pool_cache(); |
|
226 |
__ get_method(rmethod); |
|
227 |
||
228 |
// handle exceptions |
|
229 |
{ |
|
230 |
Label L; |
|
231 |
__ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); |
|
232 |
__ cbz(rscratch1, L); |
|
233 |
__ call_VM(noreg, |
|
234 |
CAST_FROM_FN_PTR(address, |
|
235 |
InterpreterRuntime::throw_pending_exception)); |
|
236 |
__ should_not_reach_here(); |
|
237 |
__ bind(L); |
|
238 |
} |
|
239 |
||
240 |
__ get_dispatch(); |
|
241 |
||
242 |
// Calculate stack limit |
|
243 |
__ ldr(rscratch1, Address(rmethod, Method::const_offset())); |
|
244 |
__ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset())); |
|
245 |
__ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2); |
|
246 |
__ ldr(rscratch2, |
|
247 |
Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize)); |
|
248 |
__ sub(rscratch1, rscratch2, rscratch1, ext::uxtx, 3); |
|
249 |
__ andr(sp, rscratch1, -16); |
|
250 |
||
251 |
// Restore expression stack pointer |
|
252 |
__ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); |
|
253 |
// NULL last_sp until next java call |
|
254 |
__ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); |
|
255 |
||
256 |
__ dispatch_next(state, step); |
|
257 |
return entry; |
|
258 |
} |
|
259 |
||
260 |
||
261 |
int AbstractInterpreter::BasicType_as_index(BasicType type) { |
|
262 |
int i = 0; |
|
263 |
switch (type) { |
|
264 |
case T_BOOLEAN: i = 0; break; |
|
265 |
case T_CHAR : i = 1; break; |
|
266 |
case T_BYTE : i = 2; break; |
|
267 |
case T_SHORT : i = 3; break; |
|
268 |
case T_INT : i = 4; break; |
|
269 |
case T_LONG : i = 5; break; |
|
270 |
case T_VOID : i = 6; break; |
|
271 |
case T_FLOAT : i = 7; break; |
|
272 |
case T_DOUBLE : i = 8; break; |
|
273 |
case T_OBJECT : i = 9; break; |
|
274 |
case T_ARRAY : i = 9; break; |
|
275 |
default : ShouldNotReachHere(); |
|
276 |
} |
|
277 |
assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, |
|
278 |
"index out of bounds"); |
|
279 |
return i; |
|
280 |
} |
|
281 |
||
282 |
||
283 |
address TemplateInterpreterGenerator::generate_result_handler_for( |
|
284 |
BasicType type) { |
|
285 |
address entry = __ pc(); |
|
286 |
switch (type) { |
|
287 |
case T_BOOLEAN: __ uxtb(r0, r0); break; |
|
288 |
case T_CHAR : __ uxth(r0, r0); break; |
|
289 |
case T_BYTE : __ sxtb(r0, r0); break; |
|
290 |
case T_SHORT : __ sxth(r0, r0); break; |
|
291 |
case T_INT : __ uxtw(r0, r0); break; // FIXME: We almost certainly don't need this |
|
292 |
case T_LONG : /* nothing to do */ break; |
|
293 |
case T_VOID : /* nothing to do */ break; |
|
294 |
case T_FLOAT : /* nothing to do */ break; |
|
295 |
case T_DOUBLE : /* nothing to do */ break; |
|
296 |
case T_OBJECT : |
|
297 |
// retrieve result from frame |
|
298 |
__ ldr(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize)); |
|
299 |
// and verify it |
|
300 |
__ verify_oop(r0); |
|
301 |
break; |
|
302 |
default : ShouldNotReachHere(); |
|
303 |
} |
|
304 |
__ ret(lr); // return from result handler |
|
305 |
return entry; |
|
306 |
} |
|
307 |
||
308 |
address TemplateInterpreterGenerator::generate_safept_entry_for( |
|
309 |
TosState state, |
|
310 |
address runtime_entry) { |
|
311 |
address entry = __ pc(); |
|
312 |
__ push(state); |
|
313 |
__ call_VM(noreg, runtime_entry); |
|
314 |
__ membar(Assembler::AnyAny); |
|
315 |
__ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); |
|
316 |
return entry; |
|
317 |
} |
|
318 |
||
319 |
// Helpers for commoning out cases in the various type of method entries. |
|
320 |
// |
|
321 |
||
322 |
||
323 |
// increment invocation count & check for overflow |
|
324 |
// |
|
325 |
// Note: checking for negative value instead of overflow |
|
326 |
// so we have a 'sticky' overflow test |
|
327 |
// |
|
328 |
// rmethod: method |
|
329 |
// |
|
330 |
void InterpreterGenerator::generate_counter_incr( |
|
331 |
Label* overflow, |
|
332 |
Label* profile_method, |
|
333 |
Label* profile_method_continue) { |
|
334 |
Label done; |
|
335 |
// Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. |
|
336 |
if (TieredCompilation) { |
|
337 |
int increment = InvocationCounter::count_increment; |
|
338 |
int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift; |
|
339 |
Label no_mdo; |
|
340 |
if (ProfileInterpreter) { |
|
341 |
// Are we profiling? |
|
342 |
__ ldr(r0, Address(rmethod, Method::method_data_offset())); |
|
343 |
__ cbz(r0, no_mdo); |
|
344 |
// Increment counter in the MDO |
|
345 |
const Address mdo_invocation_counter(r0, in_bytes(MethodData::invocation_counter_offset()) + |
|
346 |
in_bytes(InvocationCounter::counter_offset())); |
|
347 |
__ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rscratch1, false, Assembler::EQ, overflow); |
|
348 |
__ b(done); |
|
349 |
} |
|
350 |
__ bind(no_mdo); |
|
351 |
// Increment counter in MethodCounters |
|
352 |
const Address invocation_counter(rscratch2, |
|
353 |
MethodCounters::invocation_counter_offset() + |
|
354 |
InvocationCounter::counter_offset()); |
|
355 |
__ get_method_counters(rmethod, rscratch2, done); |
|
356 |
__ increment_mask_and_jump(invocation_counter, increment, mask, rscratch1, false, Assembler::EQ, overflow); |
|
357 |
__ bind(done); |
|
358 |
} else { |
|
359 |
const Address backedge_counter(rscratch2, |
|
360 |
MethodCounters::backedge_counter_offset() + |
|
361 |
InvocationCounter::counter_offset()); |
|
362 |
const Address invocation_counter(rscratch2, |
|
363 |
MethodCounters::invocation_counter_offset() + |
|
364 |
InvocationCounter::counter_offset()); |
|
365 |
||
366 |
__ get_method_counters(rmethod, rscratch2, done); |
|
367 |
||
368 |
if (ProfileInterpreter) { // %%% Merge this into MethodData* |
|
369 |
__ ldrw(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset())); |
|
370 |
__ addw(r1, r1, 1); |
|
371 |
__ strw(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset())); |
|
372 |
} |
|
373 |
// Update standard invocation counters |
|
374 |
__ ldrw(r1, invocation_counter); |
|
375 |
__ ldrw(r0, backedge_counter); |
|
376 |
||
377 |
__ addw(r1, r1, InvocationCounter::count_increment); |
|
378 |
__ andw(r0, r0, InvocationCounter::count_mask_value); |
|
379 |
||
380 |
__ strw(r1, invocation_counter); |
|
381 |
__ addw(r0, r0, r1); // add both counters |
|
382 |
||
383 |
// profile_method is non-null only for interpreted method so |
|
384 |
// profile_method != NULL == !native_call |
|
385 |
||
386 |
if (ProfileInterpreter && profile_method != NULL) { |
|
387 |
// Test to see if we should create a method data oop |
|
388 |
unsigned long offset; |
|
389 |
__ adrp(rscratch2, ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit), |
|
390 |
offset); |
|
391 |
__ ldrw(rscratch2, Address(rscratch2, offset)); |
|
392 |
__ cmp(r0, rscratch2); |
|
393 |
__ br(Assembler::LT, *profile_method_continue); |
|
394 |
||
395 |
// if no method data exists, go to profile_method |
|
396 |
__ test_method_data_pointer(r0, *profile_method); |
|
397 |
} |
|
398 |
||
399 |
{ |
|
400 |
unsigned long offset; |
|
401 |
__ adrp(rscratch2, |
|
402 |
ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit), |
|
403 |
offset); |
|
404 |
__ ldrw(rscratch2, Address(rscratch2, offset)); |
|
405 |
__ cmpw(r0, rscratch2); |
|
406 |
__ br(Assembler::HS, *overflow); |
|
407 |
} |
|
408 |
__ bind(done); |
|
409 |
} |
|
410 |
} |
|
411 |
||
412 |
void InterpreterGenerator::generate_counter_overflow(Label* do_continue) { |
|
413 |
||
414 |
// Asm interpreter on entry |
|
415 |
// On return (i.e. jump to entry_point) [ back to invocation of interpreter ] |
|
416 |
// Everything as it was on entry |
|
417 |
||
418 |
// InterpreterRuntime::frequency_counter_overflow takes two |
|
419 |
// arguments, the first (thread) is passed by call_VM, the second |
|
420 |
// indicates if the counter overflow occurs at a backwards branch |
|
421 |
// (NULL bcp). We pass zero for it. The call returns the address |
|
422 |
// of the verified entry point for the method or NULL if the |
|
423 |
// compilation did not complete (either went background or bailed |
|
424 |
// out). |
|
425 |
__ mov(c_rarg1, 0); |
|
426 |
__ call_VM(noreg, |
|
427 |
CAST_FROM_FN_PTR(address, |
|
428 |
InterpreterRuntime::frequency_counter_overflow), |
|
429 |
c_rarg1); |
|
430 |
||
431 |
__ b(*do_continue); |
|
432 |
} |
|
433 |
||
434 |
// See if we've got enough room on the stack for locals plus overhead. |
|
435 |
// The expression stack grows down incrementally, so the normal guard |
|
436 |
// page mechanism will work for that. |
|
437 |
// |
|
438 |
// NOTE: Since the additional locals are also always pushed (wasn't |
|
439 |
// obvious in generate_method_entry) so the guard should work for them |
|
440 |
// too. |
|
441 |
// |
|
442 |
// Args: |
|
443 |
// r3: number of additional locals this frame needs (what we must check) |
|
444 |
// rmethod: Method* |
|
445 |
// |
|
446 |
// Kills: |
|
447 |
// r0 |
|
448 |
void InterpreterGenerator::generate_stack_overflow_check(void) { |
|
449 |
||
450 |
// monitor entry size: see picture of stack set |
|
451 |
// (generate_method_entry) and frame_amd64.hpp |
|
452 |
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; |
|
453 |
||
454 |
// total overhead size: entry_size + (saved rbp through expr stack |
|
455 |
// bottom). be sure to change this if you add/subtract anything |
|
456 |
// to/from the overhead area |
|
457 |
const int overhead_size = |
|
458 |
-(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size; |
|
459 |
||
460 |
const int page_size = os::vm_page_size(); |
|
461 |
||
462 |
Label after_frame_check; |
|
463 |
||
464 |
// see if the frame is greater than one page in size. If so, |
|
465 |
// then we need to verify there is enough stack space remaining |
|
466 |
// for the additional locals. |
|
467 |
// |
|
468 |
// Note that we use SUBS rather than CMP here because the immediate |
|
469 |
// field of this instruction may overflow. SUBS can cope with this |
|
470 |
// because it is a macro that will expand to some number of MOV |
|
471 |
// instructions and a register operation. |
|
472 |
__ subs(rscratch1, r3, (page_size - overhead_size) / Interpreter::stackElementSize); |
|
473 |
__ br(Assembler::LS, after_frame_check); |
|
474 |
||
475 |
// compute rsp as if this were going to be the last frame on |
|
476 |
// the stack before the red zone |
|
477 |
||
478 |
const Address stack_base(rthread, Thread::stack_base_offset()); |
|
479 |
const Address stack_size(rthread, Thread::stack_size_offset()); |
|
480 |
||
481 |
// locals + overhead, in bytes |
|
482 |
__ mov(r0, overhead_size); |
|
483 |
__ add(r0, r0, r3, Assembler::LSL, Interpreter::logStackElementSize); // 2 slots per parameter. |
|
484 |
||
485 |
__ ldr(rscratch1, stack_base); |
|
486 |
__ ldr(rscratch2, stack_size); |
|
487 |
||
488 |
#ifdef ASSERT |
|
489 |
Label stack_base_okay, stack_size_okay; |
|
490 |
// verify that thread stack base is non-zero |
|
491 |
__ cbnz(rscratch1, stack_base_okay); |
|
492 |
__ stop("stack base is zero"); |
|
493 |
__ bind(stack_base_okay); |
|
494 |
// verify that thread stack size is non-zero |
|
495 |
__ cbnz(rscratch2, stack_size_okay); |
|
496 |
__ stop("stack size is zero"); |
|
497 |
__ bind(stack_size_okay); |
|
498 |
#endif |
|
499 |
||
500 |
// Add stack base to locals and subtract stack size |
|
501 |
__ sub(rscratch1, rscratch1, rscratch2); // Stack limit |
|
502 |
__ add(r0, r0, rscratch1); |
|
503 |
||
504 |
// Use the maximum number of pages we might bang. |
|
505 |
const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages : |
|
506 |
(StackRedPages+StackYellowPages); |
|
507 |
||
508 |
// add in the red and yellow zone sizes |
|
509 |
__ add(r0, r0, max_pages * page_size * 2); |
|
510 |
||
511 |
// check against the current stack bottom |
|
512 |
__ cmp(sp, r0); |
|
513 |
__ br(Assembler::HI, after_frame_check); |
|
514 |
||
515 |
// Remove the incoming args, peeling the machine SP back to where it |
|
516 |
// was in the caller. This is not strictly necessary, but unless we |
|
517 |
// do so the stack frame may have a garbage FP; this ensures a |
|
518 |
// correct call stack that we can always unwind. The ANDR should be |
|
519 |
// unnecessary because the sender SP in r13 is always aligned, but |
|
520 |
// it doesn't hurt. |
|
521 |
__ andr(sp, r13, -16); |
|
522 |
||
523 |
// Note: the restored frame is not necessarily interpreted. |
|
524 |
// Use the shared runtime version of the StackOverflowError. |
|
525 |
assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); |
|
526 |
__ far_jump(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry())); |
|
527 |
||
528 |
// all done with frame size check |
|
529 |
__ bind(after_frame_check); |
|
530 |
} |
|
531 |
||
532 |
// Allocate monitor and lock method (asm interpreter) |
|
533 |
// |
|
534 |
// Args: |
|
535 |
// rmethod: Method* |
|
536 |
// rlocals: locals |
|
537 |
// |
|
538 |
// Kills: |
|
539 |
// r0 |
|
540 |
// c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs) |
|
541 |
// rscratch1, rscratch2 (scratch regs) |
|
542 |
void InterpreterGenerator::lock_method(void) { |
|
543 |
// synchronize method |
|
544 |
const Address access_flags(rmethod, Method::access_flags_offset()); |
|
545 |
const Address monitor_block_top( |
|
546 |
rfp, |
|
547 |
frame::interpreter_frame_monitor_block_top_offset * wordSize); |
|
548 |
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; |
|
549 |
||
550 |
#ifdef ASSERT |
|
551 |
{ |
|
552 |
Label L; |
|
553 |
__ ldrw(r0, access_flags); |
|
554 |
__ tst(r0, JVM_ACC_SYNCHRONIZED); |
|
555 |
__ br(Assembler::NE, L); |
|
556 |
__ stop("method doesn't need synchronization"); |
|
557 |
__ bind(L); |
|
558 |
} |
|
559 |
#endif // ASSERT |
|
560 |
||
561 |
// get synchronization object |
|
562 |
{ |
|
563 |
const int mirror_offset = in_bytes(Klass::java_mirror_offset()); |
|
564 |
Label done; |
|
565 |
__ ldrw(r0, access_flags); |
|
566 |
__ tst(r0, JVM_ACC_STATIC); |
|
567 |
// get receiver (assume this is frequent case) |
|
568 |
__ ldr(r0, Address(rlocals, Interpreter::local_offset_in_bytes(0))); |
|
569 |
__ br(Assembler::EQ, done); |
|
570 |
__ ldr(r0, Address(rmethod, Method::const_offset())); |
|
571 |
__ ldr(r0, Address(r0, ConstMethod::constants_offset())); |
|
572 |
__ ldr(r0, Address(r0, |
|
573 |
ConstantPool::pool_holder_offset_in_bytes())); |
|
574 |
__ ldr(r0, Address(r0, mirror_offset)); |
|
575 |
||
576 |
#ifdef ASSERT |
|
577 |
{ |
|
578 |
Label L; |
|
579 |
__ cbnz(r0, L); |
|
580 |
__ stop("synchronization object is NULL"); |
|
581 |
__ bind(L); |
|
582 |
} |
|
583 |
#endif // ASSERT |
|
584 |
||
585 |
__ bind(done); |
|
586 |
} |
|
587 |
||
588 |
// add space for monitor & lock |
|
589 |
__ sub(sp, sp, entry_size); // add space for a monitor entry |
|
590 |
__ sub(esp, esp, entry_size); |
|
591 |
__ mov(rscratch1, esp); |
|
592 |
__ str(rscratch1, monitor_block_top); // set new monitor block top |
|
593 |
// store object |
|
594 |
__ str(r0, Address(esp, BasicObjectLock::obj_offset_in_bytes())); |
|
595 |
__ mov(c_rarg1, esp); // object address |
|
596 |
__ lock_object(c_rarg1); |
|
597 |
} |
|
598 |
||
599 |
// Generate a fixed interpreter frame. This is identical setup for |
|
600 |
// interpreted methods and for native methods hence the shared code. |
|
601 |
// |
|
602 |
// Args: |
|
603 |
// lr: return address |
|
604 |
// rmethod: Method* |
|
605 |
// rlocals: pointer to locals |
|
606 |
// rcpool: cp cache |
|
607 |
// stack_pointer: previous sp |
|
608 |
void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { |
|
609 |
// initialize fixed part of activation frame |
|
610 |
if (native_call) { |
|
611 |
__ sub(esp, sp, 12 * wordSize); |
|
612 |
__ mov(rbcp, zr); |
|
613 |
__ stp(esp, zr, Address(__ pre(sp, -12 * wordSize))); |
|
614 |
// add 2 zero-initialized slots for native calls |
|
615 |
__ stp(zr, zr, Address(sp, 10 * wordSize)); |
|
616 |
} else { |
|
617 |
__ sub(esp, sp, 10 * wordSize); |
|
618 |
__ ldr(rscratch1, Address(rmethod, Method::const_offset())); // get ConstMethod |
|
619 |
__ add(rbcp, rscratch1, in_bytes(ConstMethod::codes_offset())); // get codebase |
|
620 |
__ stp(esp, rbcp, Address(__ pre(sp, -10 * wordSize))); |
|
621 |
} |
|
622 |
||
623 |
if (ProfileInterpreter) { |
|
624 |
Label method_data_continue; |
|
625 |
__ ldr(rscratch1, Address(rmethod, Method::method_data_offset())); |
|
626 |
__ cbz(rscratch1, method_data_continue); |
|
627 |
__ lea(rscratch1, Address(rscratch1, in_bytes(MethodData::data_offset()))); |
|
628 |
__ bind(method_data_continue); |
|
629 |
__ stp(rscratch1, rmethod, Address(sp, 4 * wordSize)); // save Method* and mdp (method data pointer) |
|
630 |
} else { |
|
631 |
__ stp(zr, rmethod, Address(sp, 4 * wordSize)); // save Method* (no mdp) |
|
632 |
} |
|
633 |
||
634 |
__ ldr(rcpool, Address(rmethod, Method::const_offset())); |
|
635 |
__ ldr(rcpool, Address(rcpool, ConstMethod::constants_offset())); |
|
636 |
__ ldr(rcpool, Address(rcpool, ConstantPool::cache_offset_in_bytes())); |
|
637 |
__ stp(rlocals, rcpool, Address(sp, 2 * wordSize)); |
|
638 |
||
639 |
__ stp(rfp, lr, Address(sp, 8 * wordSize)); |
|
640 |
__ lea(rfp, Address(sp, 8 * wordSize)); |
|
641 |
||
642 |
// set sender sp |
|
643 |
// leave last_sp as null |
|
644 |
__ stp(zr, r13, Address(sp, 6 * wordSize)); |
|
645 |
||
646 |
// Move SP out of the way |
|
647 |
if (! native_call) { |
|
648 |
__ ldr(rscratch1, Address(rmethod, Method::const_offset())); |
|
649 |
__ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset())); |
|
650 |
__ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2); |
|
651 |
__ sub(rscratch1, sp, rscratch1, ext::uxtw, 3); |
|
652 |
__ andr(sp, rscratch1, -16); |
|
653 |
} |
|
654 |
} |
|
655 |
||
656 |
// End of helpers |
|
657 |
||
658 |
// Various method entries |
|
659 |
//------------------------------------------------------------------------------------------------------------------------ |
|
660 |
// |
|
661 |
// |
|
662 |
||
663 |
// Method entry for java.lang.ref.Reference.get. |
|
664 |
address InterpreterGenerator::generate_Reference_get_entry(void) { |
|
665 |
#if INCLUDE_ALL_GCS |
|
666 |
// Code: _aload_0, _getfield, _areturn |
|
667 |
// parameter size = 1 |
|
668 |
// |
|
669 |
// The code that gets generated by this routine is split into 2 parts: |
|
670 |
// 1. The "intrinsified" code for G1 (or any SATB based GC), |
|
671 |
// 2. The slow path - which is an expansion of the regular method entry. |
|
672 |
// |
|
673 |
// Notes:- |
|
674 |
// * In the G1 code we do not check whether we need to block for |
|
675 |
// a safepoint. If G1 is enabled then we must execute the specialized |
|
676 |
// code for Reference.get (except when the Reference object is null) |
|
677 |
// so that we can log the value in the referent field with an SATB |
|
678 |
// update buffer. |
|
679 |
// If the code for the getfield template is modified so that the |
|
680 |
// G1 pre-barrier code is executed when the current method is |
|
681 |
// Reference.get() then going through the normal method entry |
|
682 |
// will be fine. |
|
683 |
// * The G1 code can, however, check the receiver object (the instance |
|
684 |
// of java.lang.Reference) and jump to the slow path if null. If the |
|
685 |
// Reference object is null then we obviously cannot fetch the referent |
|
686 |
// and so we don't need to call the G1 pre-barrier. Thus we can use the |
|
687 |
// regular method entry code to generate the NPE. |
|
688 |
// |
|
689 |
// This code is based on generate_accessor_enty. |
|
690 |
// |
|
691 |
// rmethod: Method* |
|
692 |
// r13: senderSP must preserve for slow path, set SP to it on fast path |
|
693 |
||
694 |
address entry = __ pc(); |
|
695 |
||
696 |
const int referent_offset = java_lang_ref_Reference::referent_offset; |
|
697 |
guarantee(referent_offset > 0, "referent offset not initialized"); |
|
698 |
||
699 |
if (UseG1GC) { |
|
700 |
Label slow_path; |
|
701 |
const Register local_0 = c_rarg0; |
|
702 |
// Check if local 0 != NULL |
|
703 |
// If the receiver is null then it is OK to jump to the slow path. |
|
704 |
__ ldr(local_0, Address(esp, 0)); |
|
705 |
__ cbz(local_0, slow_path); |
|
706 |
||
707 |
||
708 |
// Load the value of the referent field. |
|
709 |
const Address field_address(local_0, referent_offset); |
|
710 |
__ load_heap_oop(local_0, field_address); |
|
711 |
||
712 |
// Generate the G1 pre-barrier code to log the value of |
|
713 |
// the referent field in an SATB buffer. |
|
714 |
__ enter(); // g1_write may call runtime |
|
715 |
__ g1_write_barrier_pre(noreg /* obj */, |
|
716 |
local_0 /* pre_val */, |
|
717 |
rthread /* thread */, |
|
718 |
rscratch2 /* tmp */, |
|
719 |
true /* tosca_live */, |
|
720 |
true /* expand_call */); |
|
721 |
__ leave(); |
|
722 |
// areturn |
|
723 |
__ andr(sp, r13, -16); // done with stack |
|
724 |
__ ret(lr); |
|
725 |
||
726 |
// generate a vanilla interpreter entry as the slow path |
|
727 |
__ bind(slow_path); |
|
728 |
(void) generate_normal_entry(false); |
|
729 |
||
730 |
return entry; |
|
731 |
} |
|
732 |
#endif // INCLUDE_ALL_GCS |
|
733 |
||
734 |
// If G1 is not enabled then attempt to go through the accessor entry point |
|
735 |
// Reference.get is an accessor |
|
736 |
return generate_accessor_entry(); |
|
737 |
} |
|
738 |
||
739 |
/** |
|
740 |
* Method entry for static native methods: |
|
741 |
* int java.util.zip.CRC32.update(int crc, int b) |
|
742 |
*/ |
|
743 |
address InterpreterGenerator::generate_CRC32_update_entry() { |
|
744 |
if (UseCRC32Intrinsics) { |
|
745 |
address entry = __ pc(); |
|
746 |
||
747 |
// rmethod: Method* |
|
748 |
// r13: senderSP must preserved for slow path |
|
749 |
// esp: args |
|
750 |
||
751 |
Label slow_path; |
|
752 |
// If we need a safepoint check, generate full interpreter entry. |
|
753 |
ExternalAddress state(SafepointSynchronize::address_of_state()); |
|
754 |
unsigned long offset; |
|
755 |
__ adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset); |
|
756 |
__ ldrw(rscratch1, Address(rscratch1, offset)); |
|
757 |
assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code"); |
|
758 |
__ cbnz(rscratch1, slow_path); |
|
759 |
||
760 |
// We don't generate local frame and don't align stack because |
|
761 |
// we call stub code and there is no safepoint on this path. |
|
762 |
||
763 |
// Load parameters |
|
764 |
const Register crc = c_rarg0; // crc |
|
765 |
const Register val = c_rarg1; // source java byte value |
|
766 |
const Register tbl = c_rarg2; // scratch |
|
767 |
||
768 |
// Arguments are reversed on java expression stack |
|
769 |
__ ldrw(val, Address(esp, 0)); // byte value |
|
770 |
__ ldrw(crc, Address(esp, wordSize)); // Initial CRC |
|
771 |
||
772 |
__ adrp(tbl, ExternalAddress(StubRoutines::crc_table_addr()), offset); |
|
773 |
__ add(tbl, tbl, offset); |
|
774 |
||
775 |
__ ornw(crc, zr, crc); // ~crc |
|
776 |
__ update_byte_crc32(crc, val, tbl); |
|
777 |
__ ornw(crc, zr, crc); // ~crc |
|
778 |
||
779 |
// result in c_rarg0 |
|
780 |
||
781 |
__ andr(sp, r13, -16); |
|
782 |
__ ret(lr); |
|
783 |
||
784 |
// generate a vanilla native entry as the slow path |
|
785 |
__ bind(slow_path); |
|
786 |
||
787 |
(void) generate_native_entry(false); |
|
788 |
||
789 |
return entry; |
|
790 |
} |
|
791 |
return generate_native_entry(false); |
|
792 |
} |
|
793 |
||
794 |
/** |
|
795 |
* Method entry for static native methods: |
|
796 |
* int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) |
|
797 |
* int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) |
|
798 |
*/ |
|
799 |
address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { |
|
800 |
if (UseCRC32Intrinsics) { |
|
801 |
address entry = __ pc(); |
|
802 |
||
803 |
// rmethod,: Method* |
|
804 |
// r13: senderSP must preserved for slow path |
|
805 |
||
806 |
Label slow_path; |
|
807 |
// If we need a safepoint check, generate full interpreter entry. |
|
808 |
ExternalAddress state(SafepointSynchronize::address_of_state()); |
|
809 |
unsigned long offset; |
|
810 |
__ adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset); |
|
811 |
__ ldrw(rscratch1, Address(rscratch1, offset)); |
|
812 |
assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code"); |
|
813 |
__ cbnz(rscratch1, slow_path); |
|
814 |
||
815 |
// We don't generate local frame and don't align stack because |
|
816 |
// we call stub code and there is no safepoint on this path. |
|
817 |
||
818 |
// Load parameters |
|
819 |
const Register crc = c_rarg0; // crc |
|
820 |
const Register buf = c_rarg1; // source java byte array address |
|
821 |
const Register len = c_rarg2; // length |
|
822 |
const Register off = len; // offset (never overlaps with 'len') |
|
823 |
||
824 |
// Arguments are reversed on java expression stack |
|
825 |
// Calculate address of start element |
|
826 |
if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { |
|
827 |
__ ldr(buf, Address(esp, 2*wordSize)); // long buf |
|
828 |
__ ldrw(off, Address(esp, wordSize)); // offset |
|
829 |
__ add(buf, buf, off); // + offset |
|
830 |
__ ldrw(crc, Address(esp, 4*wordSize)); // Initial CRC |
|
831 |
} else { |
|
832 |
__ ldr(buf, Address(esp, 2*wordSize)); // byte[] array |
|
833 |
__ add(buf, buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size |
|
834 |
__ ldrw(off, Address(esp, wordSize)); // offset |
|
835 |
__ add(buf, buf, off); // + offset |
|
836 |
__ ldrw(crc, Address(esp, 3*wordSize)); // Initial CRC |
|
837 |
} |
|
838 |
// Can now load 'len' since we're finished with 'off' |
|
839 |
__ ldrw(len, Address(esp, 0x0)); // Length |
|
840 |
||
841 |
__ andr(sp, r13, -16); // Restore the caller's SP |
|
842 |
||
843 |
// We are frameless so we can just jump to the stub. |
|
844 |
__ b(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32())); |
|
845 |
||
846 |
// generate a vanilla native entry as the slow path |
|
847 |
__ bind(slow_path); |
|
848 |
||
849 |
(void) generate_native_entry(false); |
|
850 |
||
851 |
return entry; |
|
852 |
} |
|
853 |
return generate_native_entry(false); |
|
854 |
} |
|
855 |
||
856 |
void InterpreterGenerator::bang_stack_shadow_pages(bool native_call) { |
|
857 |
// Bang each page in the shadow zone. We can't assume it's been done for |
|
858 |
// an interpreter frame with greater than a page of locals, so each page |
|
859 |
// needs to be checked. Only true for non-native. |
|
860 |
if (UseStackBanging) { |
|
861 |
const int start_page = native_call ? StackShadowPages : 1; |
|
862 |
const int page_size = os::vm_page_size(); |
|
863 |
for (int pages = start_page; pages <= StackShadowPages ; pages++) { |
|
864 |
__ sub(rscratch2, sp, pages*page_size); |
|
865 |
__ ldr(zr, Address(rscratch2)); |
|
866 |
} |
|
867 |
} |
|
868 |
} |
|
869 |
||
870 |
||
871 |
// Interpreter stub for calling a native method. (asm interpreter) |
|
872 |
// This sets up a somewhat different looking stack for calling the |
|
873 |
// native method than the typical interpreter frame setup. |
|
874 |
address InterpreterGenerator::generate_native_entry(bool synchronized) { |
|
875 |
// determine code generation flags |
|
876 |
bool inc_counter = UseCompiler || CountCompiledCalls; |
|
877 |
||
878 |
// r1: Method* |
|
879 |
// rscratch1: sender sp |
|
880 |
||
881 |
address entry_point = __ pc(); |
|
882 |
||
883 |
const Address constMethod (rmethod, Method::const_offset()); |
|
884 |
const Address access_flags (rmethod, Method::access_flags_offset()); |
|
885 |
const Address size_of_parameters(r2, ConstMethod:: |
|
886 |
size_of_parameters_offset()); |
|
887 |
||
888 |
// get parameter size (always needed) |
|
889 |
__ ldr(r2, constMethod); |
|
890 |
__ load_unsigned_short(r2, size_of_parameters); |
|
891 |
||
892 |
// native calls don't need the stack size check since they have no |
|
893 |
// expression stack and the arguments are already on the stack and |
|
894 |
// we only add a handful of words to the stack |
|
895 |
||
896 |
// rmethod: Method* |
|
897 |
// r2: size of parameters |
|
898 |
// rscratch1: sender sp |
|
899 |
||
900 |
// for natives the size of locals is zero |
|
901 |
||
902 |
// compute beginning of parameters (rlocals) |
|
903 |
__ add(rlocals, esp, r2, ext::uxtx, 3); |
|
904 |
__ add(rlocals, rlocals, -wordSize); |
|
905 |
||
906 |
// Pull SP back to minimum size: this avoids holes in the stack |
|
907 |
__ andr(sp, esp, -16); |
|
908 |
||
909 |
// initialize fixed part of activation frame |
|
910 |
generate_fixed_frame(true); |
|
911 |
#ifndef PRODUCT |
|
912 |
// tell the simulator that a method has been entered |
|
913 |
if (NotifySimulator) { |
|
914 |
__ notify(Assembler::method_entry); |
|
915 |
} |
|
916 |
#endif |
|
917 |
||
918 |
// make sure method is native & not abstract |
|
919 |
#ifdef ASSERT |
|
920 |
__ ldrw(r0, access_flags); |
|
921 |
{ |
|
922 |
Label L; |
|
923 |
__ tst(r0, JVM_ACC_NATIVE); |
|
924 |
__ br(Assembler::NE, L); |
|
925 |
__ stop("tried to execute non-native method as native"); |
|
926 |
__ bind(L); |
|
927 |
} |
|
928 |
{ |
|
929 |
Label L; |
|
930 |
__ tst(r0, JVM_ACC_ABSTRACT); |
|
931 |
__ br(Assembler::EQ, L); |
|
932 |
__ stop("tried to execute abstract method in interpreter"); |
|
933 |
__ bind(L); |
|
934 |
} |
|
935 |
#endif |
|
936 |
||
937 |
// Since at this point in the method invocation the exception |
|
938 |
// handler would try to exit the monitor of synchronized methods |
|
939 |
// which hasn't been entered yet, we set the thread local variable |
|
940 |
// _do_not_unlock_if_synchronized to true. The remove_activation |
|
941 |
// will check this flag. |
|
942 |
||
943 |
const Address do_not_unlock_if_synchronized(rthread, |
|
944 |
in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); |
|
945 |
__ mov(rscratch2, true); |
|
946 |
__ strb(rscratch2, do_not_unlock_if_synchronized); |
|
947 |
||
948 |
// increment invocation count & check for overflow |
|
949 |
Label invocation_counter_overflow; |
|
950 |
if (inc_counter) { |
|
951 |
generate_counter_incr(&invocation_counter_overflow, NULL, NULL); |
|
952 |
} |
|
953 |
||
954 |
Label continue_after_compile; |
|
955 |
__ bind(continue_after_compile); |
|
956 |
||
957 |
bang_stack_shadow_pages(true); |
|
958 |
||
959 |
// reset the _do_not_unlock_if_synchronized flag |
|
960 |
__ strb(zr, do_not_unlock_if_synchronized); |
|
961 |
||
962 |
// check for synchronized methods |
|
963 |
// Must happen AFTER invocation_counter check and stack overflow check, |
|
964 |
// so method is not locked if overflows. |
|
965 |
if (synchronized) { |
|
966 |
lock_method(); |
|
967 |
} else { |
|
968 |
// no synchronization necessary |
|
969 |
#ifdef ASSERT |
|
970 |
{ |
|
971 |
Label L; |
|
972 |
__ ldrw(r0, access_flags); |
|
973 |
__ tst(r0, JVM_ACC_SYNCHRONIZED); |
|
974 |
__ br(Assembler::EQ, L); |
|
975 |
__ stop("method needs synchronization"); |
|
976 |
__ bind(L); |
|
977 |
} |
|
978 |
#endif |
|
979 |
} |
|
980 |
||
981 |
// start execution |
|
982 |
#ifdef ASSERT |
|
983 |
{ |
|
984 |
Label L; |
|
985 |
const Address monitor_block_top(rfp, |
|
986 |
frame::interpreter_frame_monitor_block_top_offset * wordSize); |
|
987 |
__ ldr(rscratch1, monitor_block_top); |
|
988 |
__ cmp(esp, rscratch1); |
|
989 |
__ br(Assembler::EQ, L); |
|
990 |
__ stop("broken stack frame setup in interpreter"); |
|
991 |
__ bind(L); |
|
992 |
} |
|
993 |
#endif |
|
994 |
||
995 |
// jvmti support |
|
996 |
__ notify_method_entry(); |
|
997 |
||
998 |
// work registers |
|
999 |
const Register t = r17; |
|
1000 |
const Register result_handler = r19; |
|
1001 |
||
1002 |
// allocate space for parameters |
|
1003 |
__ ldr(t, Address(rmethod, Method::const_offset())); |
|
1004 |
__ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset())); |
|
1005 |
||
1006 |
__ sub(rscratch1, esp, t, ext::uxtx, Interpreter::logStackElementSize); |
|
1007 |
__ andr(sp, rscratch1, -16); |
|
1008 |
__ mov(esp, rscratch1); |
|
1009 |
||
1010 |
// get signature handler |
|
1011 |
{ |
|
1012 |
Label L; |
|
1013 |
__ ldr(t, Address(rmethod, Method::signature_handler_offset())); |
|
1014 |
__ cbnz(t, L); |
|
1015 |
__ call_VM(noreg, |
|
1016 |
CAST_FROM_FN_PTR(address, |
|
1017 |
InterpreterRuntime::prepare_native_call), |
|
1018 |
rmethod); |
|
1019 |
__ ldr(t, Address(rmethod, Method::signature_handler_offset())); |
|
1020 |
__ bind(L); |
|
1021 |
} |
|
1022 |
||
1023 |
// call signature handler |
|
1024 |
assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals, |
|
1025 |
"adjust this code"); |
|
1026 |
assert(InterpreterRuntime::SignatureHandlerGenerator::to() == sp, |
|
1027 |
"adjust this code"); |
|
1028 |
assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == rscratch1, |
|
1029 |
"adjust this code"); |
|
1030 |
||
1031 |
// The generated handlers do not touch rmethod (the method). |
|
1032 |
// However, large signatures cannot be cached and are generated |
|
1033 |
// each time here. The slow-path generator can do a GC on return, |
|
1034 |
// so we must reload it after the call. |
|
1035 |
__ blr(t); |
|
1036 |
__ get_method(rmethod); // slow path can do a GC, reload rmethod |
|
1037 |
||
1038 |
||
1039 |
// result handler is in r0 |
|
1040 |
// set result handler |
|
1041 |
__ mov(result_handler, r0); |
|
1042 |
// pass mirror handle if static call |
|
1043 |
{ |
|
1044 |
Label L; |
|
1045 |
const int mirror_offset = in_bytes(Klass::java_mirror_offset()); |
|
1046 |
__ ldrw(t, Address(rmethod, Method::access_flags_offset())); |
|
1047 |
__ tst(t, JVM_ACC_STATIC); |
|
1048 |
__ br(Assembler::EQ, L); |
|
1049 |
// get mirror |
|
1050 |
__ ldr(t, Address(rmethod, Method::const_offset())); |
|
1051 |
__ ldr(t, Address(t, ConstMethod::constants_offset())); |
|
1052 |
__ ldr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes())); |
|
1053 |
__ ldr(t, Address(t, mirror_offset)); |
|
1054 |
// copy mirror into activation frame |
|
1055 |
__ str(t, Address(rfp, frame::interpreter_frame_oop_temp_offset * wordSize)); |
|
1056 |
// pass handle to mirror |
|
1057 |
__ add(c_rarg1, rfp, frame::interpreter_frame_oop_temp_offset * wordSize); |
|
1058 |
__ bind(L); |
|
1059 |
} |
|
1060 |
||
1061 |
// get native function entry point in r10 |
|
1062 |
{ |
|
1063 |
Label L; |
|
1064 |
__ ldr(r10, Address(rmethod, Method::native_function_offset())); |
|
1065 |
address unsatisfied = (SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); |
|
1066 |
__ mov(rscratch2, unsatisfied); |
|
1067 |
__ ldr(rscratch2, rscratch2); |
|
1068 |
__ cmp(r10, rscratch2); |
|
1069 |
__ br(Assembler::NE, L); |
|
1070 |
__ call_VM(noreg, |
|
1071 |
CAST_FROM_FN_PTR(address, |
|
1072 |
InterpreterRuntime::prepare_native_call), |
|
1073 |
rmethod); |
|
1074 |
__ get_method(rmethod); |
|
1075 |
__ ldr(r10, Address(rmethod, Method::native_function_offset())); |
|
1076 |
__ bind(L); |
|
1077 |
} |
|
1078 |
||
1079 |
// pass JNIEnv |
|
1080 |
__ add(c_rarg0, rthread, in_bytes(JavaThread::jni_environment_offset())); |
|
1081 |
||
1082 |
// It is enough that the pc() points into the right code |
|
1083 |
// segment. It does not have to be the correct return pc. |
|
1084 |
__ set_last_Java_frame(esp, rfp, (address)NULL, rscratch1); |
|
1085 |
||
1086 |
// change thread state |
|
1087 |
#ifdef ASSERT |
|
1088 |
{ |
|
1089 |
Label L; |
|
1090 |
__ ldrw(t, Address(rthread, JavaThread::thread_state_offset())); |
|
1091 |
__ cmp(t, _thread_in_Java); |
|
1092 |
__ br(Assembler::EQ, L); |
|
1093 |
__ stop("Wrong thread state in native stub"); |
|
1094 |
__ bind(L); |
|
1095 |
} |
|
1096 |
#endif |
|
1097 |
||
1098 |
// Change state to native |
|
1099 |
__ mov(rscratch1, _thread_in_native); |
|
29186
d5e61d9743aa
8069593: Changes to JavaThread::_thread_state must use acquire and release
aph
parents:
29183
diff
changeset
|
1100 |
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset())); |
d5e61d9743aa
8069593: Changes to JavaThread::_thread_state must use acquire and release
aph
parents:
29183
diff
changeset
|
1101 |
__ stlrw(rscratch1, rscratch2); |
29183 | 1102 |
|
1103 |
// Call the native method. |
|
1104 |
__ blrt(r10, rscratch1); |
|
1105 |
__ maybe_isb(); |
|
1106 |
__ get_method(rmethod); |
|
1107 |
// result potentially in r0 or v0 |
|
1108 |
||
1109 |
// make room for the pushes we're about to do |
|
1110 |
__ sub(rscratch1, esp, 4 * wordSize); |
|
1111 |
__ andr(sp, rscratch1, -16); |
|
1112 |
||
1113 |
// NOTE: The order of these pushes is known to frame::interpreter_frame_result |
|
1114 |
// in order to extract the result of a method call. If the order of these |
|
1115 |
// pushes change or anything else is added to the stack then the code in |
|
1116 |
// interpreter_frame_result must also change. |
|
1117 |
__ push(dtos); |
|
1118 |
__ push(ltos); |
|
1119 |
||
1120 |
// change thread state |
|
1121 |
__ mov(rscratch1, _thread_in_native_trans); |
|
29186
d5e61d9743aa
8069593: Changes to JavaThread::_thread_state must use acquire and release
aph
parents:
29183
diff
changeset
|
1122 |
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset())); |
d5e61d9743aa
8069593: Changes to JavaThread::_thread_state must use acquire and release
aph
parents:
29183
diff
changeset
|
1123 |
__ stlrw(rscratch1, rscratch2); |
29183 | 1124 |
|
1125 |
if (os::is_MP()) { |
|
1126 |
if (UseMembar) { |
|
1127 |
// Force this write out before the read below |
|
1128 |
__ dsb(Assembler::SY); |
|
1129 |
} else { |
|
1130 |
// Write serialization page so VM thread can do a pseudo remote membar. |
|
1131 |
// We use the current thread pointer to calculate a thread specific |
|
1132 |
// offset to write to within the page. This minimizes bus traffic |
|
1133 |
// due to cache line collision. |
|
1134 |
__ serialize_memory(rthread, rscratch2); |
|
1135 |
} |
|
1136 |
} |
|
1137 |
||
1138 |
// check for safepoint operation in progress and/or pending suspend requests |
|
1139 |
{ |
|
1140 |
Label Continue; |
|
1141 |
{ |
|
1142 |
unsigned long offset; |
|
1143 |
__ adrp(rscratch2, SafepointSynchronize::address_of_state(), offset); |
|
1144 |
__ ldrw(rscratch2, Address(rscratch2, offset)); |
|
1145 |
} |
|
1146 |
assert(SafepointSynchronize::_not_synchronized == 0, |
|
1147 |
"SafepointSynchronize::_not_synchronized"); |
|
1148 |
Label L; |
|
1149 |
__ cbnz(rscratch2, L); |
|
1150 |
__ ldrw(rscratch2, Address(rthread, JavaThread::suspend_flags_offset())); |
|
1151 |
__ cbz(rscratch2, Continue); |
|
1152 |
__ bind(L); |
|
1153 |
||
1154 |
// Don't use call_VM as it will see a possible pending exception |
|
1155 |
// and forward it and never return here preventing us from |
|
1156 |
// clearing _last_native_pc down below. So we do a runtime call by |
|
1157 |
// hand. |
|
1158 |
// |
|
1159 |
__ mov(c_rarg0, rthread); |
|
1160 |
__ mov(rscratch2, CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)); |
|
1161 |
__ blrt(rscratch2, 1, 0, 0); |
|
1162 |
__ maybe_isb(); |
|
1163 |
__ get_method(rmethod); |
|
1164 |
__ reinit_heapbase(); |
|
1165 |
__ bind(Continue); |
|
1166 |
} |
|
1167 |
||
1168 |
// change thread state |
|
1169 |
__ mov(rscratch1, _thread_in_Java); |
|
29186
d5e61d9743aa
8069593: Changes to JavaThread::_thread_state must use acquire and release
aph
parents:
29183
diff
changeset
|
1170 |
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset())); |
d5e61d9743aa
8069593: Changes to JavaThread::_thread_state must use acquire and release
aph
parents:
29183
diff
changeset
|
1171 |
__ stlrw(rscratch1, rscratch2); |
29183 | 1172 |
|
1173 |
// reset_last_Java_frame |
|
1174 |
__ reset_last_Java_frame(true, true); |
|
1175 |
||
1176 |
// reset handle block |
|
1177 |
__ ldr(t, Address(rthread, JavaThread::active_handles_offset())); |
|
1178 |
__ str(zr, Address(t, JNIHandleBlock::top_offset_in_bytes())); |
|
1179 |
||
1180 |
// If result is an oop unbox and store it in frame where gc will see it |
|
1181 |
// and result handler will pick it up |
|
1182 |
||
1183 |
{ |
|
1184 |
Label no_oop, store_result; |
|
1185 |
__ adr(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); |
|
1186 |
__ cmp(t, result_handler); |
|
1187 |
__ br(Assembler::NE, no_oop); |
|
1188 |
// retrieve result |
|
1189 |
__ pop(ltos); |
|
1190 |
__ cbz(r0, store_result); |
|
1191 |
__ ldr(r0, Address(r0, 0)); |
|
1192 |
__ bind(store_result); |
|
1193 |
__ str(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize)); |
|
1194 |
// keep stack depth as expected by pushing oop which will eventually be discarded |
|
1195 |
__ push(ltos); |
|
1196 |
__ bind(no_oop); |
|
1197 |
} |
|
1198 |
||
1199 |
{ |
|
1200 |
Label no_reguard; |
|
1201 |
__ lea(rscratch1, Address(rthread, in_bytes(JavaThread::stack_guard_state_offset()))); |
|
1202 |
__ ldrb(rscratch1, Address(rscratch1)); |
|
1203 |
__ cmp(rscratch1, JavaThread::stack_guard_yellow_disabled); |
|
1204 |
__ br(Assembler::NE, no_reguard); |
|
1205 |
||
1206 |
__ pusha(); // XXX only save smashed registers |
|
1207 |
__ mov(c_rarg0, rthread); |
|
1208 |
__ mov(rscratch2, CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); |
|
1209 |
__ blrt(rscratch2, 0, 0, 0); |
|
1210 |
__ popa(); // XXX only restore smashed registers |
|
1211 |
__ bind(no_reguard); |
|
1212 |
} |
|
1213 |
||
1214 |
// The method register is junk from after the thread_in_native transition |
|
1215 |
// until here. Also can't call_VM until the bcp has been |
|
1216 |
// restored. Need bcp for throwing exception below so get it now. |
|
1217 |
__ get_method(rmethod); |
|
1218 |
||
1219 |
// restore bcp to have legal interpreter frame, i.e., bci == 0 <=> |
|
1220 |
// rbcp == code_base() |
|
1221 |
__ ldr(rbcp, Address(rmethod, Method::const_offset())); // get ConstMethod* |
|
1222 |
__ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset())); // get codebase |
|
1223 |
// handle exceptions (exception handling will handle unlocking!) |
|
1224 |
{ |
|
1225 |
Label L; |
|
1226 |
__ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); |
|
1227 |
__ cbz(rscratch1, L); |
|
1228 |
// Note: At some point we may want to unify this with the code |
|
1229 |
// used in call_VM_base(); i.e., we should use the |
|
1230 |
// StubRoutines::forward_exception code. For now this doesn't work |
|
1231 |
// here because the rsp is not correctly set at this point. |
|
1232 |
__ MacroAssembler::call_VM(noreg, |
|
1233 |
CAST_FROM_FN_PTR(address, |
|
1234 |
InterpreterRuntime::throw_pending_exception)); |
|
1235 |
__ should_not_reach_here(); |
|
1236 |
__ bind(L); |
|
1237 |
} |
|
1238 |
||
1239 |
// do unlocking if necessary |
|
1240 |
{ |
|
1241 |
Label L; |
|
1242 |
__ ldrw(t, Address(rmethod, Method::access_flags_offset())); |
|
1243 |
__ tst(t, JVM_ACC_SYNCHRONIZED); |
|
1244 |
__ br(Assembler::EQ, L); |
|
1245 |
// the code below should be shared with interpreter macro |
|
1246 |
// assembler implementation |
|
1247 |
{ |
|
1248 |
Label unlock; |
|
1249 |
// BasicObjectLock will be first in list, since this is a |
|
1250 |
// synchronized method. However, need to check that the object |
|
1251 |
// has not been unlocked by an explicit monitorexit bytecode. |
|
1252 |
||
1253 |
// monitor expect in c_rarg1 for slow unlock path |
|
1254 |
__ lea (c_rarg1, Address(rfp, // address of first monitor |
|
1255 |
(intptr_t)(frame::interpreter_frame_initial_sp_offset * |
|
1256 |
wordSize - sizeof(BasicObjectLock)))); |
|
1257 |
||
1258 |
__ ldr(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); |
|
1259 |
__ cbnz(t, unlock); |
|
1260 |
||
1261 |
// Entry already unlocked, need to throw exception |
|
1262 |
__ MacroAssembler::call_VM(noreg, |
|
1263 |
CAST_FROM_FN_PTR(address, |
|
1264 |
InterpreterRuntime::throw_illegal_monitor_state_exception)); |
|
1265 |
__ should_not_reach_here(); |
|
1266 |
||
1267 |
__ bind(unlock); |
|
1268 |
__ unlock_object(c_rarg1); |
|
1269 |
} |
|
1270 |
__ bind(L); |
|
1271 |
} |
|
1272 |
||
1273 |
// jvmti support |
|
1274 |
// Note: This must happen _after_ handling/throwing any exceptions since |
|
1275 |
// the exception handler code notifies the runtime of method exits |
|
1276 |
// too. If this happens before, method entry/exit notifications are |
|
1277 |
// not properly paired (was bug - gri 11/22/99). |
|
1278 |
__ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI); |
|
1279 |
||
1280 |
// restore potential result in r0:d0, call result handler to |
|
1281 |
// restore potential result in ST0 & handle result |
|
1282 |
||
1283 |
__ pop(ltos); |
|
1284 |
__ pop(dtos); |
|
1285 |
||
1286 |
__ blr(result_handler); |
|
1287 |
||
1288 |
// remove activation |
|
1289 |
__ ldr(esp, Address(rfp, |
|
1290 |
frame::interpreter_frame_sender_sp_offset * |
|
1291 |
wordSize)); // get sender sp |
|
1292 |
// remove frame anchor |
|
1293 |
__ leave(); |
|
1294 |
||
1295 |
// resture sender sp |
|
1296 |
__ mov(sp, esp); |
|
1297 |
||
1298 |
__ ret(lr); |
|
1299 |
||
1300 |
if (inc_counter) { |
|
1301 |
// Handle overflow of counter and compile method |
|
1302 |
__ bind(invocation_counter_overflow); |
|
1303 |
generate_counter_overflow(&continue_after_compile); |
|
1304 |
} |
|
1305 |
||
1306 |
return entry_point; |
|
1307 |
} |
|
1308 |
||
1309 |
// |
|
1310 |
// Generic interpreted method entry to (asm) interpreter |
|
1311 |
// |
|
1312 |
address InterpreterGenerator::generate_normal_entry(bool synchronized) { |
|
1313 |
// determine code generation flags |
|
1314 |
bool inc_counter = UseCompiler || CountCompiledCalls; |
|
1315 |
||
1316 |
// rscratch1: sender sp |
|
1317 |
address entry_point = __ pc(); |
|
1318 |
||
1319 |
const Address constMethod(rmethod, Method::const_offset()); |
|
1320 |
const Address access_flags(rmethod, Method::access_flags_offset()); |
|
1321 |
const Address size_of_parameters(r3, |
|
1322 |
ConstMethod::size_of_parameters_offset()); |
|
1323 |
const Address size_of_locals(r3, ConstMethod::size_of_locals_offset()); |
|
1324 |
||
1325 |
// get parameter size (always needed) |
|
1326 |
// need to load the const method first |
|
1327 |
__ ldr(r3, constMethod); |
|
1328 |
__ load_unsigned_short(r2, size_of_parameters); |
|
1329 |
||
1330 |
// r2: size of parameters |
|
1331 |
||
1332 |
__ load_unsigned_short(r3, size_of_locals); // get size of locals in words |
|
1333 |
__ sub(r3, r3, r2); // r3 = no. of additional locals |
|
1334 |
||
1335 |
// see if we've got enough room on the stack for locals plus overhead. |
|
1336 |
generate_stack_overflow_check(); |
|
1337 |
||
1338 |
// compute beginning of parameters (rlocals) |
|
1339 |
__ add(rlocals, esp, r2, ext::uxtx, 3); |
|
1340 |
__ sub(rlocals, rlocals, wordSize); |
|
1341 |
||
1342 |
// Make room for locals |
|
1343 |
__ sub(rscratch1, esp, r3, ext::uxtx, 3); |
|
1344 |
__ andr(sp, rscratch1, -16); |
|
1345 |
||
1346 |
// r3 - # of additional locals |
|
1347 |
// allocate space for locals |
|
1348 |
// explicitly initialize locals |
|
1349 |
{ |
|
1350 |
Label exit, loop; |
|
1351 |
__ ands(zr, r3, r3); |
|
1352 |
__ br(Assembler::LE, exit); // do nothing if r3 <= 0 |
|
1353 |
__ bind(loop); |
|
1354 |
__ str(zr, Address(__ post(rscratch1, wordSize))); |
|
1355 |
__ sub(r3, r3, 1); // until everything initialized |
|
1356 |
__ cbnz(r3, loop); |
|
1357 |
__ bind(exit); |
|
1358 |
} |
|
1359 |
||
1360 |
// And the base dispatch table |
|
1361 |
__ get_dispatch(); |
|
1362 |
||
1363 |
// initialize fixed part of activation frame |
|
1364 |
generate_fixed_frame(false); |
|
1365 |
#ifndef PRODUCT |
|
1366 |
// tell the simulator that a method has been entered |
|
1367 |
if (NotifySimulator) { |
|
1368 |
__ notify(Assembler::method_entry); |
|
1369 |
} |
|
1370 |
#endif |
|
1371 |
// make sure method is not native & not abstract |
|
1372 |
#ifdef ASSERT |
|
1373 |
__ ldrw(r0, access_flags); |
|
1374 |
{ |
|
1375 |
Label L; |
|
1376 |
__ tst(r0, JVM_ACC_NATIVE); |
|
1377 |
__ br(Assembler::EQ, L); |
|
1378 |
__ stop("tried to execute native method as non-native"); |
|
1379 |
__ bind(L); |
|
1380 |
} |
|
1381 |
{ |
|
1382 |
Label L; |
|
1383 |
__ tst(r0, JVM_ACC_ABSTRACT); |
|
1384 |
__ br(Assembler::EQ, L); |
|
1385 |
__ stop("tried to execute abstract method in interpreter"); |
|
1386 |
__ bind(L); |
|
1387 |
} |
|
1388 |
#endif |
|
1389 |
||
1390 |
// Since at this point in the method invocation the exception |
|
1391 |
// handler would try to exit the monitor of synchronized methods |
|
1392 |
// which hasn't been entered yet, we set the thread local variable |
|
1393 |
// _do_not_unlock_if_synchronized to true. The remove_activation |
|
1394 |
// will check this flag. |
|
1395 |
||
1396 |
const Address do_not_unlock_if_synchronized(rthread, |
|
1397 |
in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); |
|
1398 |
__ mov(rscratch2, true); |
|
1399 |
__ strb(rscratch2, do_not_unlock_if_synchronized); |
|
1400 |
||
1401 |
// increment invocation count & check for overflow |
|
1402 |
Label invocation_counter_overflow; |
|
1403 |
Label profile_method; |
|
1404 |
Label profile_method_continue; |
|
1405 |
if (inc_counter) { |
|
1406 |
generate_counter_incr(&invocation_counter_overflow, |
|
1407 |
&profile_method, |
|
1408 |
&profile_method_continue); |
|
1409 |
if (ProfileInterpreter) { |
|
1410 |
__ bind(profile_method_continue); |
|
1411 |
} |
|
1412 |
} |
|
1413 |
||
1414 |
Label continue_after_compile; |
|
1415 |
__ bind(continue_after_compile); |
|
1416 |
||
1417 |
bang_stack_shadow_pages(false); |
|
1418 |
||
1419 |
// reset the _do_not_unlock_if_synchronized flag |
|
1420 |
__ strb(zr, do_not_unlock_if_synchronized); |
|
1421 |
||
1422 |
// check for synchronized methods |
|
1423 |
// Must happen AFTER invocation_counter check and stack overflow check, |
|
1424 |
// so method is not locked if overflows. |
|
1425 |
if (synchronized) { |
|
1426 |
// Allocate monitor and lock method |
|
1427 |
lock_method(); |
|
1428 |
} else { |
|
1429 |
// no synchronization necessary |
|
1430 |
#ifdef ASSERT |
|
1431 |
{ |
|
1432 |
Label L; |
|
1433 |
__ ldrw(r0, access_flags); |
|
1434 |
__ tst(r0, JVM_ACC_SYNCHRONIZED); |
|
1435 |
__ br(Assembler::EQ, L); |
|
1436 |
__ stop("method needs synchronization"); |
|
1437 |
__ bind(L); |
|
1438 |
} |
|
1439 |
#endif |
|
1440 |
} |
|
1441 |
||
1442 |
// start execution |
|
1443 |
#ifdef ASSERT |
|
1444 |
{ |
|
1445 |
Label L; |
|
1446 |
const Address monitor_block_top (rfp, |
|
1447 |
frame::interpreter_frame_monitor_block_top_offset * wordSize); |
|
1448 |
__ ldr(rscratch1, monitor_block_top); |
|
1449 |
__ cmp(esp, rscratch1); |
|
1450 |
__ br(Assembler::EQ, L); |
|
1451 |
__ stop("broken stack frame setup in interpreter"); |
|
1452 |
__ bind(L); |
|
1453 |
} |
|
1454 |
#endif |
|
1455 |
||
1456 |
// jvmti support |
|
1457 |
__ notify_method_entry(); |
|
1458 |
||
1459 |
__ dispatch_next(vtos); |
|
1460 |
||
1461 |
// invocation counter overflow |
|
1462 |
if (inc_counter) { |
|
1463 |
if (ProfileInterpreter) { |
|
1464 |
// We have decided to profile this method in the interpreter |
|
1465 |
__ bind(profile_method); |
|
1466 |
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); |
|
1467 |
__ set_method_data_pointer_for_bcp(); |
|
1468 |
// don't think we need this |
|
1469 |
__ get_method(r1); |
|
1470 |
__ b(profile_method_continue); |
|
1471 |
} |
|
1472 |
// Handle overflow of counter and compile method |
|
1473 |
__ bind(invocation_counter_overflow); |
|
1474 |
generate_counter_overflow(&continue_after_compile); |
|
1475 |
} |
|
1476 |
||
1477 |
return entry_point; |
|
1478 |
} |
|
1479 |
||
1480 |
// These should never be compiled since the interpreter will prefer |
|
1481 |
// the compiled version to the intrinsic version. |
|
1482 |
bool AbstractInterpreter::can_be_compiled(methodHandle m) { |
|
1483 |
switch (method_kind(m)) { |
|
1484 |
case Interpreter::java_lang_math_sin : // fall thru |
|
1485 |
case Interpreter::java_lang_math_cos : // fall thru |
|
1486 |
case Interpreter::java_lang_math_tan : // fall thru |
|
1487 |
case Interpreter::java_lang_math_abs : // fall thru |
|
1488 |
case Interpreter::java_lang_math_log : // fall thru |
|
1489 |
case Interpreter::java_lang_math_log10 : // fall thru |
|
1490 |
case Interpreter::java_lang_math_sqrt : // fall thru |
|
1491 |
case Interpreter::java_lang_math_pow : // fall thru |
|
1492 |
case Interpreter::java_lang_math_exp : |
|
1493 |
return false; |
|
1494 |
default: |
|
1495 |
return true; |
|
1496 |
} |
|
1497 |
} |
|
1498 |
||
1499 |
// How much stack a method activation needs in words. |
|
1500 |
int AbstractInterpreter::size_top_interpreter_activation(Method* method) { |
|
1501 |
const int entry_size = frame::interpreter_frame_monitor_size(); |
|
1502 |
||
1503 |
// total overhead size: entry_size + (saved rfp thru expr stack |
|
1504 |
// bottom). be sure to change this if you add/subtract anything |
|
1505 |
// to/from the overhead area |
|
1506 |
const int overhead_size = |
|
1507 |
-(frame::interpreter_frame_initial_sp_offset) + entry_size; |
|
1508 |
||
1509 |
const int stub_code = frame::entry_frame_after_call_words; |
|
1510 |
const int method_stack = (method->max_locals() + method->max_stack()) * |
|
1511 |
Interpreter::stackElementWords; |
|
1512 |
return (overhead_size + method_stack + stub_code); |
|
1513 |
} |
|
1514 |
||
1515 |
// asm based interpreter deoptimization helpers |
|
1516 |
int AbstractInterpreter::size_activation(int max_stack, |
|
1517 |
int temps, |
|
1518 |
int extra_args, |
|
1519 |
int monitors, |
|
1520 |
int callee_params, |
|
1521 |
int callee_locals, |
|
1522 |
bool is_top_frame) { |
|
1523 |
// Note: This calculation must exactly parallel the frame setup |
|
1524 |
// in InterpreterGenerator::generate_method_entry. |
|
1525 |
||
1526 |
// fixed size of an interpreter frame: |
|
1527 |
int overhead = frame::sender_sp_offset - |
|
1528 |
frame::interpreter_frame_initial_sp_offset; |
|
1529 |
// Our locals were accounted for by the caller (or last_frame_adjust |
|
1530 |
// on the transistion) Since the callee parameters already account |
|
1531 |
// for the callee's params we only need to account for the extra |
|
1532 |
// locals. |
|
1533 |
int size = overhead + |
|
1534 |
(callee_locals - callee_params)*Interpreter::stackElementWords + |
|
1535 |
monitors * frame::interpreter_frame_monitor_size() + |
|
1536 |
temps* Interpreter::stackElementWords + extra_args; |
|
1537 |
||
1538 |
// On AArch64 we always keep the stack pointer 16-aligned, so we |
|
1539 |
// must round up here. |
|
1540 |
size = round_to(size, 2); |
|
1541 |
||
1542 |
return size; |
|
1543 |
} |
|
1544 |
||
1545 |
void AbstractInterpreter::layout_activation(Method* method, |
|
1546 |
int tempcount, |
|
1547 |
int popframe_extra_args, |
|
1548 |
int moncount, |
|
1549 |
int caller_actual_parameters, |
|
1550 |
int callee_param_count, |
|
1551 |
int callee_locals, |
|
1552 |
frame* caller, |
|
1553 |
frame* interpreter_frame, |
|
1554 |
bool is_top_frame, |
|
1555 |
bool is_bottom_frame) { |
|
1556 |
// The frame interpreter_frame is guaranteed to be the right size, |
|
1557 |
// as determined by a previous call to the size_activation() method. |
|
1558 |
// It is also guaranteed to be walkable even though it is in a |
|
1559 |
// skeletal state |
|
1560 |
||
1561 |
int max_locals = method->max_locals() * Interpreter::stackElementWords; |
|
1562 |
int extra_locals = (method->max_locals() - method->size_of_parameters()) * |
|
1563 |
Interpreter::stackElementWords; |
|
1564 |
||
1565 |
#ifdef ASSERT |
|
1566 |
assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable"); |
|
1567 |
#endif |
|
1568 |
||
1569 |
interpreter_frame->interpreter_frame_set_method(method); |
|
1570 |
// NOTE the difference in using sender_sp and |
|
1571 |
// interpreter_frame_sender_sp interpreter_frame_sender_sp is |
|
1572 |
// the original sp of the caller (the unextended_sp) and |
|
1573 |
// sender_sp is fp+8/16 (32bit/64bit) XXX |
|
1574 |
intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1; |
|
1575 |
||
1576 |
#ifdef ASSERT |
|
1577 |
if (caller->is_interpreted_frame()) { |
|
1578 |
assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement"); |
|
1579 |
} |
|
1580 |
#endif |
|
1581 |
||
1582 |
interpreter_frame->interpreter_frame_set_locals(locals); |
|
1583 |
BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin(); |
|
1584 |
BasicObjectLock* monbot = montop - moncount; |
|
1585 |
interpreter_frame->interpreter_frame_set_monitor_end(monbot); |
|
1586 |
||
1587 |
// Set last_sp |
|
1588 |
intptr_t* esp = (intptr_t*) monbot - |
|
1589 |
tempcount*Interpreter::stackElementWords - |
|
1590 |
popframe_extra_args; |
|
1591 |
interpreter_frame->interpreter_frame_set_last_sp(esp); |
|
1592 |
||
1593 |
// All frames but the initial (oldest) interpreter frame we fill in have |
|
1594 |
// a value for sender_sp that allows walking the stack but isn't |
|
1595 |
// truly correct. Correct the value here. |
|
1596 |
if (extra_locals != 0 && |
|
1597 |
interpreter_frame->sender_sp() == |
|
1598 |
interpreter_frame->interpreter_frame_sender_sp()) { |
|
1599 |
interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() + |
|
1600 |
extra_locals); |
|
1601 |
} |
|
1602 |
*interpreter_frame->interpreter_frame_cache_addr() = |
|
1603 |
method->constants()->cache(); |
|
1604 |
} |
|
1605 |
||
1606 |
||
1607 |
//----------------------------------------------------------------------------- |
|
1608 |
// Exceptions |
|
1609 |
||
1610 |
void TemplateInterpreterGenerator::generate_throw_exception() { |
|
1611 |
// Entry point in previous activation (i.e., if the caller was |
|
1612 |
// interpreted) |
|
1613 |
Interpreter::_rethrow_exception_entry = __ pc(); |
|
1614 |
// Restore sp to interpreter_frame_last_sp even though we are going |
|
1615 |
// to empty the expression stack for the exception processing. |
|
1616 |
__ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); |
|
1617 |
// r0: exception |
|
1618 |
// r3: return address/pc that threw exception |
|
1619 |
__ restore_bcp(); // rbcp points to call/send |
|
1620 |
__ restore_locals(); |
|
1621 |
__ restore_constant_pool_cache(); |
|
1622 |
__ reinit_heapbase(); // restore rheapbase as heapbase. |
|
1623 |
__ get_dispatch(); |
|
1624 |
||
1625 |
#ifndef PRODUCT |
|
1626 |
// tell the simulator that the caller method has been reentered |
|
1627 |
if (NotifySimulator) { |
|
1628 |
__ get_method(rmethod); |
|
1629 |
__ notify(Assembler::method_reentry); |
|
1630 |
} |
|
1631 |
#endif |
|
1632 |
// Entry point for exceptions thrown within interpreter code |
|
1633 |
Interpreter::_throw_exception_entry = __ pc(); |
|
1634 |
// If we came here via a NullPointerException on the receiver of a |
|
1635 |
// method, rmethod may be corrupt. |
|
1636 |
__ get_method(rmethod); |
|
1637 |
// expression stack is undefined here |
|
1638 |
// r0: exception |
|
1639 |
// rbcp: exception bcp |
|
1640 |
__ verify_oop(r0); |
|
1641 |
__ mov(c_rarg1, r0); |
|
1642 |
||
1643 |
// expression stack must be empty before entering the VM in case of |
|
1644 |
// an exception |
|
1645 |
__ empty_expression_stack(); |
|
1646 |
// find exception handler address and preserve exception oop |
|
1647 |
__ call_VM(r3, |
|
1648 |
CAST_FROM_FN_PTR(address, |
|
1649 |
InterpreterRuntime::exception_handler_for_exception), |
|
1650 |
c_rarg1); |
|
1651 |
||
1652 |
// Calculate stack limit |
|
1653 |
__ ldr(rscratch1, Address(rmethod, Method::const_offset())); |
|
1654 |
__ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset())); |
|
1655 |
__ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 4); |
|
1656 |
__ ldr(rscratch2, |
|
1657 |
Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize)); |
|
1658 |
__ sub(rscratch1, rscratch2, rscratch1, ext::uxtx, 3); |
|
1659 |
__ andr(sp, rscratch1, -16); |
|
1660 |
||
1661 |
// r0: exception handler entry point |
|
1662 |
// r3: preserved exception oop |
|
1663 |
// rbcp: bcp for exception handler |
|
1664 |
__ push_ptr(r3); // push exception which is now the only value on the stack |
|
1665 |
__ br(r0); // jump to exception handler (may be _remove_activation_entry!) |
|
1666 |
||
1667 |
// If the exception is not handled in the current frame the frame is |
|
1668 |
// removed and the exception is rethrown (i.e. exception |
|
1669 |
// continuation is _rethrow_exception). |
|
1670 |
// |
|
1671 |
// Note: At this point the bci is still the bxi for the instruction |
|
1672 |
// which caused the exception and the expression stack is |
|
1673 |
// empty. Thus, for any VM calls at this point, GC will find a legal |
|
1674 |
// oop map (with empty expression stack). |
|
1675 |
||
1676 |
// |
|
1677 |
// JVMTI PopFrame support |
|
1678 |
// |
|
1679 |
||
1680 |
Interpreter::_remove_activation_preserving_args_entry = __ pc(); |
|
1681 |
__ empty_expression_stack(); |
|
1682 |
// Set the popframe_processing bit in pending_popframe_condition |
|
1683 |
// indicating that we are currently handling popframe, so that |
|
1684 |
// call_VMs that may happen later do not trigger new popframe |
|
1685 |
// handling cycles. |
|
1686 |
__ ldrw(r3, Address(rthread, JavaThread::popframe_condition_offset())); |
|
1687 |
__ orr(r3, r3, JavaThread::popframe_processing_bit); |
|
1688 |
__ strw(r3, Address(rthread, JavaThread::popframe_condition_offset())); |
|
1689 |
||
1690 |
{ |
|
1691 |
// Check to see whether we are returning to a deoptimized frame. |
|
1692 |
// (The PopFrame call ensures that the caller of the popped frame is |
|
1693 |
// either interpreted or compiled and deoptimizes it if compiled.) |
|
1694 |
// In this case, we can't call dispatch_next() after the frame is |
|
1695 |
// popped, but instead must save the incoming arguments and restore |
|
1696 |
// them after deoptimization has occurred. |
|
1697 |
// |
|
1698 |
// Note that we don't compare the return PC against the |
|
1699 |
// deoptimization blob's unpack entry because of the presence of |
|
1700 |
// adapter frames in C2. |
|
1701 |
Label caller_not_deoptimized; |
|
1702 |
__ ldr(c_rarg1, Address(rfp, frame::return_addr_offset * wordSize)); |
|
1703 |
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, |
|
1704 |
InterpreterRuntime::interpreter_contains), c_rarg1); |
|
1705 |
__ cbnz(r0, caller_not_deoptimized); |
|
1706 |
||
1707 |
// Compute size of arguments for saving when returning to |
|
1708 |
// deoptimized caller |
|
1709 |
__ get_method(r0); |
|
1710 |
__ ldr(r0, Address(r0, Method::const_offset())); |
|
1711 |
__ load_unsigned_short(r0, Address(r0, in_bytes(ConstMethod:: |
|
1712 |
size_of_parameters_offset()))); |
|
1713 |
__ lsl(r0, r0, Interpreter::logStackElementSize); |
|
1714 |
__ restore_locals(); // XXX do we need this? |
|
1715 |
__ sub(rlocals, rlocals, r0); |
|
1716 |
__ add(rlocals, rlocals, wordSize); |
|
1717 |
// Save these arguments |
|
1718 |
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, |
|
1719 |
Deoptimization:: |
|
1720 |
popframe_preserve_args), |
|
1721 |
rthread, r0, rlocals); |
|
1722 |
||
1723 |
__ remove_activation(vtos, |
|
1724 |
/* throw_monitor_exception */ false, |
|
1725 |
/* install_monitor_exception */ false, |
|
1726 |
/* notify_jvmdi */ false); |
|
1727 |
||
1728 |
// Inform deoptimization that it is responsible for restoring |
|
1729 |
// these arguments |
|
1730 |
__ mov(rscratch1, JavaThread::popframe_force_deopt_reexecution_bit); |
|
1731 |
__ strw(rscratch1, Address(rthread, JavaThread::popframe_condition_offset())); |
|
1732 |
||
1733 |
// Continue in deoptimization handler |
|
1734 |
__ ret(lr); |
|
1735 |
||
1736 |
__ bind(caller_not_deoptimized); |
|
1737 |
} |
|
1738 |
||
1739 |
__ remove_activation(vtos, |
|
1740 |
/* throw_monitor_exception */ false, |
|
1741 |
/* install_monitor_exception */ false, |
|
1742 |
/* notify_jvmdi */ false); |
|
1743 |
||
1744 |
// Restore the last_sp and null it out |
|
1745 |
__ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); |
|
1746 |
__ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); |
|
1747 |
||
1748 |
__ restore_bcp(); |
|
1749 |
__ restore_locals(); |
|
1750 |
__ restore_constant_pool_cache(); |
|
1751 |
__ get_method(rmethod); |
|
1752 |
||
1753 |
// The method data pointer was incremented already during |
|
1754 |
// call profiling. We have to restore the mdp for the current bcp. |
|
1755 |
if (ProfileInterpreter) { |
|
1756 |
__ set_method_data_pointer_for_bcp(); |
|
1757 |
} |
|
1758 |
||
1759 |
// Clear the popframe condition flag |
|
1760 |
__ strw(zr, Address(rthread, JavaThread::popframe_condition_offset())); |
|
1761 |
assert(JavaThread::popframe_inactive == 0, "fix popframe_inactive"); |
|
1762 |
||
1763 |
#if INCLUDE_JVMTI |
|
1764 |
{ |
|
1765 |
Label L_done; |
|
1766 |
||
1767 |
__ ldrb(rscratch1, Address(rbcp, 0)); |
|
1768 |
__ cmpw(r1, Bytecodes::_invokestatic); |
|
1769 |
__ br(Assembler::EQ, L_done); |
|
1770 |
||
1771 |
// The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. |
|
1772 |
// Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. |
|
1773 |
||
1774 |
__ ldr(c_rarg0, Address(rlocals, 0)); |
|
1775 |
__ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), c_rarg0, rmethod, rbcp); |
|
1776 |
||
1777 |
__ cbz(r0, L_done); |
|
1778 |
||
1779 |
__ str(r0, Address(esp, 0)); |
|
1780 |
__ bind(L_done); |
|
1781 |
} |
|
1782 |
#endif // INCLUDE_JVMTI |
|
1783 |
||
1784 |
// Restore machine SP |
|
1785 |
__ ldr(rscratch1, Address(rmethod, Method::const_offset())); |
|
1786 |
__ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset())); |
|
1787 |
__ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 4); |
|
1788 |
__ ldr(rscratch2, |
|
1789 |
Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize)); |
|
1790 |
__ sub(rscratch1, rscratch2, rscratch1, ext::uxtw, 3); |
|
1791 |
__ andr(sp, rscratch1, -16); |
|
1792 |
||
1793 |
__ dispatch_next(vtos); |
|
1794 |
// end of PopFrame support |
|
1795 |
||
1796 |
Interpreter::_remove_activation_entry = __ pc(); |
|
1797 |
||
1798 |
// preserve exception over this code sequence |
|
1799 |
__ pop_ptr(r0); |
|
1800 |
__ str(r0, Address(rthread, JavaThread::vm_result_offset())); |
|
1801 |
// remove the activation (without doing throws on illegalMonitorExceptions) |
|
1802 |
__ remove_activation(vtos, false, true, false); |
|
1803 |
// restore exception |
|
1804 |
// restore exception |
|
1805 |
__ get_vm_result(r0, rthread); |
|
1806 |
||
1807 |
// In between activations - previous activation type unknown yet |
|
1808 |
// compute continuation point - the continuation point expects the |
|
1809 |
// following registers set up: |
|
1810 |
// |
|
1811 |
// r0: exception |
|
1812 |
// lr: return address/pc that threw exception |
|
1813 |
// rsp: expression stack of caller |
|
1814 |
// rfp: fp of caller |
|
1815 |
// FIXME: There's no point saving LR here because VM calls don't trash it |
|
1816 |
__ stp(r0, lr, Address(__ pre(sp, -2 * wordSize))); // save exception & return address |
|
1817 |
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, |
|
1818 |
SharedRuntime::exception_handler_for_return_address), |
|
1819 |
rthread, lr); |
|
1820 |
__ mov(r1, r0); // save exception handler |
|
1821 |
__ ldp(r0, lr, Address(__ post(sp, 2 * wordSize))); // restore exception & return address |
|
1822 |
// We might be returning to a deopt handler that expects r3 to |
|
1823 |
// contain the exception pc |
|
1824 |
__ mov(r3, lr); |
|
1825 |
// Note that an "issuing PC" is actually the next PC after the call |
|
1826 |
__ br(r1); // jump to exception |
|
1827 |
// handler of caller |
|
1828 |
} |
|
1829 |
||
1830 |
||
1831 |
// |
|
1832 |
// JVMTI ForceEarlyReturn support |
|
1833 |
// |
|
1834 |
address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { |
|
1835 |
address entry = __ pc(); |
|
1836 |
||
1837 |
__ restore_bcp(); |
|
1838 |
__ restore_locals(); |
|
1839 |
__ empty_expression_stack(); |
|
1840 |
__ load_earlyret_value(state); |
|
1841 |
||
1842 |
__ ldr(rscratch1, Address(rthread, JavaThread::jvmti_thread_state_offset())); |
|
1843 |
Address cond_addr(rscratch1, JvmtiThreadState::earlyret_state_offset()); |
|
1844 |
||
1845 |
// Clear the earlyret state |
|
1846 |
assert(JvmtiThreadState::earlyret_inactive == 0, "should be"); |
|
1847 |
__ str(zr, cond_addr); |
|
1848 |
||
1849 |
__ remove_activation(state, |
|
1850 |
false, /* throw_monitor_exception */ |
|
1851 |
false, /* install_monitor_exception */ |
|
1852 |
true); /* notify_jvmdi */ |
|
1853 |
__ ret(lr); |
|
1854 |
||
1855 |
return entry; |
|
1856 |
} // end of ForceEarlyReturn support |
|
1857 |
||
1858 |
||
1859 |
||
1860 |
//----------------------------------------------------------------------------- |
|
1861 |
// Helper for vtos entry point generation |
|
1862 |
||
1863 |
void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, |
|
1864 |
address& bep, |
|
1865 |
address& cep, |
|
1866 |
address& sep, |
|
1867 |
address& aep, |
|
1868 |
address& iep, |
|
1869 |
address& lep, |
|
1870 |
address& fep, |
|
1871 |
address& dep, |
|
1872 |
address& vep) { |
|
1873 |
assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); |
|
1874 |
Label L; |
|
1875 |
aep = __ pc(); __ push_ptr(); __ b(L); |
|
1876 |
fep = __ pc(); __ push_f(); __ b(L); |
|
1877 |
dep = __ pc(); __ push_d(); __ b(L); |
|
1878 |
lep = __ pc(); __ push_l(); __ b(L); |
|
1879 |
bep = cep = sep = |
|
1880 |
iep = __ pc(); __ push_i(); |
|
1881 |
vep = __ pc(); |
|
1882 |
__ bind(L); |
|
1883 |
generate_and_dispatch(t); |
|
1884 |
} |
|
1885 |
||
1886 |
//----------------------------------------------------------------------------- |
|
1887 |
// Generation of individual instructions |
|
1888 |
||
1889 |
// helpers for generate_and_dispatch |
|
1890 |
||
1891 |
||
1892 |
InterpreterGenerator::InterpreterGenerator(StubQueue* code) |
|
1893 |
: TemplateInterpreterGenerator(code) { |
|
1894 |
generate_all(); // down here so it can be "virtual" |
|
1895 |
} |
|
1896 |
||
1897 |
//----------------------------------------------------------------------------- |
|
1898 |
||
1899 |
// Non-product code |
|
1900 |
#ifndef PRODUCT |
|
1901 |
address TemplateInterpreterGenerator::generate_trace_code(TosState state) { |
|
1902 |
address entry = __ pc(); |
|
1903 |
||
1904 |
__ push(lr); |
|
1905 |
__ push(state); |
|
1906 |
__ push(RegSet::range(r0, r15), sp); |
|
1907 |
__ mov(c_rarg2, r0); // Pass itos |
|
1908 |
__ call_VM(noreg, |
|
1909 |
CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), |
|
1910 |
c_rarg1, c_rarg2, c_rarg3); |
|
1911 |
__ pop(RegSet::range(r0, r15), sp); |
|
1912 |
__ pop(state); |
|
1913 |
__ pop(lr); |
|
1914 |
__ ret(lr); // return from result handler |
|
1915 |
||
1916 |
return entry; |
|
1917 |
} |
|
1918 |
||
1919 |
void TemplateInterpreterGenerator::count_bytecode() { |
|
1920 |
__ push(rscratch1); |
|
1921 |
__ push(rscratch2); |
|
1922 |
Label L; |
|
1923 |
__ mov(rscratch2, (address) &BytecodeCounter::_counter_value); |
|
1924 |
__ bind(L); |
|
1925 |
__ ldxr(rscratch1, rscratch2); |
|
1926 |
__ add(rscratch1, rscratch1, 1); |
|
1927 |
__ stxr(rscratch1, rscratch1, rscratch2); |
|
1928 |
__ cbnzw(rscratch1, L); |
|
1929 |
__ pop(rscratch2); |
|
1930 |
__ pop(rscratch1); |
|
1931 |
} |
|
1932 |
||
1933 |
void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { ; } |
|
1934 |
||
1935 |
void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { ; } |
|
1936 |
||
1937 |
||
1938 |
void TemplateInterpreterGenerator::trace_bytecode(Template* t) { |
|
1939 |
// Call a little run-time stub to avoid blow-up for each bytecode. |
|
1940 |
// The run-time runtime saves the right registers, depending on |
|
1941 |
// the tosca in-state for the given template. |
|
1942 |
||
1943 |
assert(Interpreter::trace_code(t->tos_in()) != NULL, |
|
1944 |
"entry must have been generated"); |
|
1945 |
__ bl(Interpreter::trace_code(t->tos_in())); |
|
1946 |
__ reinit_heapbase(); |
|
1947 |
} |
|
1948 |
||
1949 |
||
1950 |
void TemplateInterpreterGenerator::stop_interpreter_at() { |
|
1951 |
Label L; |
|
1952 |
__ push(rscratch1); |
|
1953 |
__ mov(rscratch1, (address) &BytecodeCounter::_counter_value); |
|
1954 |
__ ldr(rscratch1, Address(rscratch1)); |
|
1955 |
__ mov(rscratch2, StopInterpreterAt); |
|
1956 |
__ cmpw(rscratch1, rscratch2); |
|
1957 |
__ br(Assembler::NE, L); |
|
1958 |
__ brk(0); |
|
1959 |
__ bind(L); |
|
1960 |
__ pop(rscratch1); |
|
1961 |
} |
|
1962 |
||
1963 |
#ifdef BUILTIN_SIM |
|
1964 |
||
1965 |
#include <sys/mman.h> |
|
1966 |
#include <unistd.h> |
|
1967 |
||
1968 |
extern "C" { |
|
1969 |
static int PAGESIZE = getpagesize(); |
|
1970 |
int is_mapped_address(u_int64_t address) |
|
1971 |
{ |
|
1972 |
address = (address & ~((u_int64_t)PAGESIZE - 1)); |
|
1973 |
if (msync((void *)address, PAGESIZE, MS_ASYNC) == 0) { |
|
1974 |
return true; |
|
1975 |
} |
|
1976 |
if (errno != ENOMEM) { |
|
1977 |
return true; |
|
1978 |
} |
|
1979 |
return false; |
|
1980 |
} |
|
1981 |
||
1982 |
void bccheck1(u_int64_t pc, u_int64_t fp, char *method, int *bcidx, int *framesize, char *decode) |
|
1983 |
{ |
|
1984 |
if (method != 0) { |
|
1985 |
method[0] = '\0'; |
|
1986 |
} |
|
1987 |
if (bcidx != 0) { |
|
1988 |
*bcidx = -2; |
|
1989 |
} |
|
1990 |
if (decode != 0) { |
|
1991 |
decode[0] = 0; |
|
1992 |
} |
|
1993 |
||
1994 |
if (framesize != 0) { |
|
1995 |
*framesize = -1; |
|
1996 |
} |
|
1997 |
||
1998 |
if (Interpreter::contains((address)pc)) { |
|
1999 |
AArch64Simulator *sim = AArch64Simulator::get_current(UseSimulatorCache, DisableBCCheck); |
|
2000 |
Method* meth; |
|
2001 |
address bcp; |
|
2002 |
if (fp) { |
|
2003 |
#define FRAME_SLOT_METHOD 3 |
|
2004 |
#define FRAME_SLOT_BCP 7 |
|
2005 |
meth = (Method*)sim->getMemory()->loadU64(fp - (FRAME_SLOT_METHOD << 3)); |
|
2006 |
bcp = (address)sim->getMemory()->loadU64(fp - (FRAME_SLOT_BCP << 3)); |
|
2007 |
#undef FRAME_SLOT_METHOD |
|
2008 |
#undef FRAME_SLOT_BCP |
|
2009 |
} else { |
|
2010 |
meth = (Method*)sim->getCPUState().xreg(RMETHOD, 0); |
|
2011 |
bcp = (address)sim->getCPUState().xreg(RBCP, 0); |
|
2012 |
} |
|
2013 |
if (meth->is_native()) { |
|
2014 |
return; |
|
2015 |
} |
|
2016 |
if(method && meth->is_method()) { |
|
2017 |
ResourceMark rm; |
|
2018 |
method[0] = 'I'; |
|
2019 |
method[1] = ' '; |
|
2020 |
meth->name_and_sig_as_C_string(method + 2, 398); |
|
2021 |
} |
|
2022 |
if (bcidx) { |
|
2023 |
if (meth->contains(bcp)) { |
|
2024 |
*bcidx = meth->bci_from(bcp); |
|
2025 |
} else { |
|
2026 |
*bcidx = -2; |
|
2027 |
} |
|
2028 |
} |
|
2029 |
if (decode) { |
|
2030 |
if (!BytecodeTracer::closure()) { |
|
2031 |
BytecodeTracer::set_closure(BytecodeTracer::std_closure()); |
|
2032 |
} |
|
2033 |
stringStream str(decode, 400); |
|
2034 |
BytecodeTracer::trace(meth, bcp, &str); |
|
2035 |
} |
|
2036 |
} else { |
|
2037 |
if (method) { |
|
2038 |
CodeBlob *cb = CodeCache::find_blob((address)pc); |
|
2039 |
if (cb != NULL) { |
|
2040 |
if (cb->is_nmethod()) { |
|
2041 |
ResourceMark rm; |
|
2042 |
nmethod* nm = (nmethod*)cb; |
|
2043 |
method[0] = 'C'; |
|
2044 |
method[1] = ' '; |
|
2045 |
nm->method()->name_and_sig_as_C_string(method + 2, 398); |
|
2046 |
} else if (cb->is_adapter_blob()) { |
|
2047 |
strcpy(method, "B adapter blob"); |
|
2048 |
} else if (cb->is_runtime_stub()) { |
|
2049 |
strcpy(method, "B runtime stub"); |
|
2050 |
} else if (cb->is_exception_stub()) { |
|
2051 |
strcpy(method, "B exception stub"); |
|
2052 |
} else if (cb->is_deoptimization_stub()) { |
|
2053 |
strcpy(method, "B deoptimization stub"); |
|
2054 |
} else if (cb->is_safepoint_stub()) { |
|
2055 |
strcpy(method, "B safepoint stub"); |
|
2056 |
} else if (cb->is_uncommon_trap_stub()) { |
|
2057 |
strcpy(method, "B uncommon trap stub"); |
|
2058 |
} else if (cb->contains((address)StubRoutines::call_stub())) { |
|
2059 |
strcpy(method, "B call stub"); |
|
2060 |
} else { |
|
2061 |
strcpy(method, "B unknown blob : "); |
|
2062 |
strcat(method, cb->name()); |
|
2063 |
} |
|
2064 |
if (framesize != NULL) { |
|
2065 |
*framesize = cb->frame_size(); |
|
2066 |
} |
|
2067 |
} |
|
2068 |
} |
|
2069 |
} |
|
2070 |
} |
|
2071 |
||
2072 |
||
2073 |
JNIEXPORT void bccheck(u_int64_t pc, u_int64_t fp, char *method, int *bcidx, int *framesize, char *decode) |
|
2074 |
{ |
|
2075 |
bccheck1(pc, fp, method, bcidx, framesize, decode); |
|
2076 |
} |
|
2077 |
} |
|
2078 |
||
2079 |
#endif // BUILTIN_SIM |
|
2080 |
#endif // !PRODUCT |
|
2081 |
#endif // ! CC_INTERP |