1
|
1 |
/*
|
670
|
2 |
* Copyright 2007-2008 Sun Microsystems, Inc. All Rights Reserved.
|
1
|
3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 |
*
|
|
5 |
* This code is free software; you can redistribute it and/or modify it
|
|
6 |
* under the terms of the GNU General Public License version 2 only, as
|
|
7 |
* published by the Free Software Foundation.
|
|
8 |
*
|
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
13 |
* accompanied this code).
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License version
|
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 |
*
|
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 |
* have any questions.
|
|
22 |
*
|
|
23 |
*/
|
|
24 |
|
|
25 |
#include "incls/_precompiled.incl"
|
|
26 |
#include "incls/_cppInterpreter_sparc.cpp.incl"
|
|
27 |
|
|
28 |
#ifdef CC_INTERP
|
|
29 |
|
|
30 |
// Routine exists to make tracebacks look decent in debugger
|
|
31 |
// while "shadow" interpreter frames are on stack. It is also
|
|
32 |
// used to distinguish interpreter frames.
|
|
33 |
|
|
34 |
extern "C" void RecursiveInterpreterActivation(interpreterState istate) {
|
|
35 |
ShouldNotReachHere();
|
|
36 |
}
|
|
37 |
|
|
38 |
bool CppInterpreter::contains(address pc) {
|
|
39 |
return ( _code->contains(pc) ||
|
|
40 |
( pc == (CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation) + frame::pc_return_offset)));
|
|
41 |
}
|
|
42 |
|
|
43 |
#define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name))
|
|
44 |
#define __ _masm->
|
|
45 |
|
|
46 |
Label frame_manager_entry;
|
|
47 |
Label fast_accessor_slow_entry_path; // fast accessor methods need to be able to jmp to unsynchronized
|
|
48 |
// c++ interpreter entry point this holds that entry point label.
|
|
49 |
|
|
50 |
static address unctrap_frame_manager_entry = NULL;
|
|
51 |
|
|
52 |
static address interpreter_return_address = NULL;
|
|
53 |
static address deopt_frame_manager_return_atos = NULL;
|
|
54 |
static address deopt_frame_manager_return_btos = NULL;
|
|
55 |
static address deopt_frame_manager_return_itos = NULL;
|
|
56 |
static address deopt_frame_manager_return_ltos = NULL;
|
|
57 |
static address deopt_frame_manager_return_ftos = NULL;
|
|
58 |
static address deopt_frame_manager_return_dtos = NULL;
|
|
59 |
static address deopt_frame_manager_return_vtos = NULL;
|
|
60 |
|
|
61 |
const Register prevState = G1_scratch;
|
|
62 |
|
|
63 |
void InterpreterGenerator::save_native_result(void) {
|
|
64 |
// result potentially in O0/O1: save it across calls
|
|
65 |
__ stf(FloatRegisterImpl::D, F0, STATE(_native_fresult));
|
|
66 |
#ifdef _LP64
|
|
67 |
__ stx(O0, STATE(_native_lresult));
|
|
68 |
#else
|
|
69 |
__ std(O0, STATE(_native_lresult));
|
|
70 |
#endif
|
|
71 |
}
|
|
72 |
|
|
73 |
void InterpreterGenerator::restore_native_result(void) {
|
|
74 |
|
|
75 |
// Restore any method result value
|
|
76 |
__ ldf(FloatRegisterImpl::D, STATE(_native_fresult), F0);
|
|
77 |
#ifdef _LP64
|
|
78 |
__ ldx(STATE(_native_lresult), O0);
|
|
79 |
#else
|
|
80 |
__ ldd(STATE(_native_lresult), O0);
|
|
81 |
#endif
|
|
82 |
}
|
|
83 |
|
|
84 |
// A result handler converts/unboxes a native call result into
|
|
85 |
// a java interpreter/compiler result. The current frame is an
|
|
86 |
// interpreter frame. The activation frame unwind code must be
|
|
87 |
// consistent with that of TemplateTable::_return(...). In the
|
|
88 |
// case of native methods, the caller's SP was not modified.
|
|
89 |
address CppInterpreterGenerator::generate_result_handler_for(BasicType type) {
|
|
90 |
address entry = __ pc();
|
|
91 |
Register Itos_i = Otos_i ->after_save();
|
|
92 |
Register Itos_l = Otos_l ->after_save();
|
|
93 |
Register Itos_l1 = Otos_l1->after_save();
|
|
94 |
Register Itos_l2 = Otos_l2->after_save();
|
|
95 |
switch (type) {
|
|
96 |
case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, Itos_i); break; // !0 => true; 0 => false
|
|
97 |
case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, Itos_i); break; // cannot use and3, 0xFFFF too big as immediate value!
|
|
98 |
case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i); break;
|
|
99 |
case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i); break;
|
|
100 |
case T_LONG :
|
|
101 |
#ifndef _LP64
|
|
102 |
__ mov(O1, Itos_l2); // move other half of long
|
|
103 |
#endif // ifdef or no ifdef, fall through to the T_INT case
|
|
104 |
case T_INT : __ mov(O0, Itos_i); break;
|
|
105 |
case T_VOID : /* nothing to do */ break;
|
|
106 |
case T_FLOAT : assert(F0 == Ftos_f, "fix this code" ); break;
|
|
107 |
case T_DOUBLE : assert(F0 == Ftos_d, "fix this code" ); break;
|
|
108 |
case T_OBJECT :
|
|
109 |
__ ld_ptr(STATE(_oop_temp), Itos_i);
|
|
110 |
__ verify_oop(Itos_i);
|
|
111 |
break;
|
|
112 |
default : ShouldNotReachHere();
|
|
113 |
}
|
|
114 |
__ ret(); // return from interpreter activation
|
|
115 |
__ delayed()->restore(I5_savedSP, G0, SP); // remove interpreter frame
|
|
116 |
NOT_PRODUCT(__ emit_long(0);) // marker for disassembly
|
|
117 |
return entry;
|
|
118 |
}
|
|
119 |
|
|
120 |
// tosca based result to c++ interpreter stack based result.
|
|
121 |
// Result goes to address in L1_scratch
|
|
122 |
|
|
123 |
address CppInterpreterGenerator::generate_tosca_to_stack_converter(BasicType type) {
|
|
124 |
// A result is in the native abi result register from a native method call.
|
|
125 |
// We need to return this result to the interpreter by pushing the result on the interpreter's
|
|
126 |
// stack. This is relatively simple the destination is in L1_scratch
|
|
127 |
// i.e. L1_scratch is the first free element on the stack. If we "push" a return value we must
|
|
128 |
// adjust L1_scratch
|
|
129 |
address entry = __ pc();
|
|
130 |
switch (type) {
|
|
131 |
case T_BOOLEAN:
|
|
132 |
// !0 => true; 0 => false
|
|
133 |
__ subcc(G0, O0, G0);
|
|
134 |
__ addc(G0, 0, O0);
|
|
135 |
__ st(O0, L1_scratch, 0);
|
|
136 |
__ sub(L1_scratch, wordSize, L1_scratch);
|
|
137 |
break;
|
|
138 |
|
|
139 |
// cannot use and3, 0xFFFF too big as immediate value!
|
|
140 |
case T_CHAR :
|
|
141 |
__ sll(O0, 16, O0);
|
|
142 |
__ srl(O0, 16, O0);
|
|
143 |
__ st(O0, L1_scratch, 0);
|
|
144 |
__ sub(L1_scratch, wordSize, L1_scratch);
|
|
145 |
break;
|
|
146 |
|
|
147 |
case T_BYTE :
|
|
148 |
__ sll(O0, 24, O0);
|
|
149 |
__ sra(O0, 24, O0);
|
|
150 |
__ st(O0, L1_scratch, 0);
|
|
151 |
__ sub(L1_scratch, wordSize, L1_scratch);
|
|
152 |
break;
|
|
153 |
|
|
154 |
case T_SHORT :
|
|
155 |
__ sll(O0, 16, O0);
|
|
156 |
__ sra(O0, 16, O0);
|
|
157 |
__ st(O0, L1_scratch, 0);
|
|
158 |
__ sub(L1_scratch, wordSize, L1_scratch);
|
|
159 |
break;
|
|
160 |
case T_LONG :
|
|
161 |
#ifndef _LP64
|
370
|
162 |
#if defined(COMPILER2)
|
1
|
163 |
// All return values are where we want them, except for Longs. C2 returns
|
|
164 |
// longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
|
|
165 |
// Since the interpreter will return longs in G1 and O0/O1 in the 32bit
|
|
166 |
// build even if we are returning from interpreted we just do a little
|
|
167 |
// stupid shuffing.
|
|
168 |
// Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
|
|
169 |
// do this here. Unfortunately if we did a rethrow we'd see an machepilog node
|
|
170 |
// first which would move g1 -> O0/O1 and destroy the exception we were throwing.
|
|
171 |
__ stx(G1, L1_scratch, -wordSize);
|
|
172 |
#else
|
|
173 |
// native result is in O0, O1
|
|
174 |
__ st(O1, L1_scratch, 0); // Low order
|
|
175 |
__ st(O0, L1_scratch, -wordSize); // High order
|
370
|
176 |
#endif /* COMPILER2 */
|
1
|
177 |
#else
|
370
|
178 |
__ stx(O0, L1_scratch, -wordSize);
|
1
|
179 |
#endif
|
|
180 |
__ sub(L1_scratch, 2*wordSize, L1_scratch);
|
|
181 |
break;
|
|
182 |
|
|
183 |
case T_INT :
|
|
184 |
__ st(O0, L1_scratch, 0);
|
|
185 |
__ sub(L1_scratch, wordSize, L1_scratch);
|
|
186 |
break;
|
|
187 |
|
|
188 |
case T_VOID : /* nothing to do */
|
|
189 |
break;
|
|
190 |
|
|
191 |
case T_FLOAT :
|
|
192 |
__ stf(FloatRegisterImpl::S, F0, L1_scratch, 0);
|
|
193 |
__ sub(L1_scratch, wordSize, L1_scratch);
|
|
194 |
break;
|
|
195 |
|
|
196 |
case T_DOUBLE :
|
|
197 |
// Every stack slot is aligned on 64 bit, However is this
|
|
198 |
// the correct stack slot on 64bit?? QQQ
|
|
199 |
__ stf(FloatRegisterImpl::D, F0, L1_scratch, -wordSize);
|
|
200 |
__ sub(L1_scratch, 2*wordSize, L1_scratch);
|
|
201 |
break;
|
|
202 |
case T_OBJECT :
|
|
203 |
__ verify_oop(O0);
|
|
204 |
__ st_ptr(O0, L1_scratch, 0);
|
|
205 |
__ sub(L1_scratch, wordSize, L1_scratch);
|
|
206 |
break;
|
|
207 |
default : ShouldNotReachHere();
|
|
208 |
}
|
|
209 |
__ retl(); // return from interpreter activation
|
|
210 |
__ delayed()->nop(); // schedule this better
|
|
211 |
NOT_PRODUCT(__ emit_long(0);) // marker for disassembly
|
|
212 |
return entry;
|
|
213 |
}
|
|
214 |
|
|
215 |
address CppInterpreterGenerator::generate_stack_to_stack_converter(BasicType type) {
|
|
216 |
// A result is in the java expression stack of the interpreted method that has just
|
|
217 |
// returned. Place this result on the java expression stack of the caller.
|
|
218 |
//
|
|
219 |
// The current interpreter activation in Lstate is for the method just returning its
|
|
220 |
// result. So we know that the result of this method is on the top of the current
|
|
221 |
// execution stack (which is pre-pushed) and will be return to the top of the caller
|
|
222 |
// stack. The top of the callers stack is the bottom of the locals of the current
|
|
223 |
// activation.
|
|
224 |
// Because of the way activation are managed by the frame manager the value of esp is
|
|
225 |
// below both the stack top of the current activation and naturally the stack top
|
|
226 |
// of the calling activation. This enable this routine to leave the return address
|
|
227 |
// to the frame manager on the stack and do a vanilla return.
|
|
228 |
//
|
|
229 |
// On entry: O0 - points to source (callee stack top)
|
|
230 |
// O1 - points to destination (caller stack top [i.e. free location])
|
|
231 |
// destroys O2, O3
|
|
232 |
//
|
|
233 |
|
|
234 |
address entry = __ pc();
|
|
235 |
switch (type) {
|
|
236 |
case T_VOID: break;
|
|
237 |
break;
|
|
238 |
case T_FLOAT :
|
|
239 |
case T_BOOLEAN:
|
|
240 |
case T_CHAR :
|
|
241 |
case T_BYTE :
|
|
242 |
case T_SHORT :
|
|
243 |
case T_INT :
|
|
244 |
// 1 word result
|
|
245 |
__ ld(O0, 0, O2);
|
|
246 |
__ st(O2, O1, 0);
|
|
247 |
__ sub(O1, wordSize, O1);
|
|
248 |
break;
|
|
249 |
case T_DOUBLE :
|
|
250 |
case T_LONG :
|
|
251 |
// return top two words on current expression stack to caller's expression stack
|
|
252 |
// The caller's expression stack is adjacent to the current frame manager's intepretState
|
|
253 |
// except we allocated one extra word for this intepretState so we won't overwrite it
|
|
254 |
// when we return a two word result.
|
|
255 |
#ifdef _LP64
|
|
256 |
__ ld_ptr(O0, 0, O2);
|
|
257 |
__ st_ptr(O2, O1, -wordSize);
|
|
258 |
#else
|
|
259 |
__ ld(O0, 0, O2);
|
|
260 |
__ ld(O0, wordSize, O3);
|
|
261 |
__ st(O3, O1, 0);
|
|
262 |
__ st(O2, O1, -wordSize);
|
|
263 |
#endif
|
|
264 |
__ sub(O1, 2*wordSize, O1);
|
|
265 |
break;
|
|
266 |
case T_OBJECT :
|
|
267 |
__ ld_ptr(O0, 0, O2);
|
|
268 |
__ verify_oop(O2); // verify it
|
|
269 |
__ st_ptr(O2, O1, 0);
|
|
270 |
__ sub(O1, wordSize, O1);
|
|
271 |
break;
|
|
272 |
default : ShouldNotReachHere();
|
|
273 |
}
|
|
274 |
__ retl();
|
|
275 |
__ delayed()->nop(); // QQ schedule this better
|
|
276 |
return entry;
|
|
277 |
}
|
|
278 |
|
|
279 |
address CppInterpreterGenerator::generate_stack_to_native_abi_converter(BasicType type) {
|
|
280 |
// A result is in the java expression stack of the interpreted method that has just
|
|
281 |
// returned. Place this result in the native abi that the caller expects.
|
|
282 |
// We are in a new frame registers we set must be in caller (i.e. callstub) frame.
|
|
283 |
//
|
|
284 |
// Similar to generate_stack_to_stack_converter above. Called at a similar time from the
|
|
285 |
// frame manager execept in this situation the caller is native code (c1/c2/call_stub)
|
|
286 |
// and so rather than return result onto caller's java expression stack we return the
|
|
287 |
// result in the expected location based on the native abi.
|
|
288 |
// On entry: O0 - source (stack top)
|
|
289 |
// On exit result in expected output register
|
|
290 |
// QQQ schedule this better
|
|
291 |
|
|
292 |
address entry = __ pc();
|
|
293 |
switch (type) {
|
|
294 |
case T_VOID: break;
|
|
295 |
break;
|
|
296 |
case T_FLOAT :
|
|
297 |
__ ldf(FloatRegisterImpl::S, O0, 0, F0);
|
|
298 |
break;
|
|
299 |
case T_BOOLEAN:
|
|
300 |
case T_CHAR :
|
|
301 |
case T_BYTE :
|
|
302 |
case T_SHORT :
|
|
303 |
case T_INT :
|
|
304 |
// 1 word result
|
|
305 |
__ ld(O0, 0, O0->after_save());
|
|
306 |
break;
|
|
307 |
case T_DOUBLE :
|
|
308 |
__ ldf(FloatRegisterImpl::D, O0, 0, F0);
|
|
309 |
break;
|
|
310 |
case T_LONG :
|
|
311 |
// return top two words on current expression stack to caller's expression stack
|
|
312 |
// The caller's expression stack is adjacent to the current frame manager's interpretState
|
|
313 |
// except we allocated one extra word for this intepretState so we won't overwrite it
|
|
314 |
// when we return a two word result.
|
|
315 |
#ifdef _LP64
|
|
316 |
__ ld_ptr(O0, 0, O0->after_save());
|
|
317 |
#else
|
|
318 |
__ ld(O0, wordSize, O1->after_save());
|
|
319 |
__ ld(O0, 0, O0->after_save());
|
|
320 |
#endif
|
|
321 |
#if defined(COMPILER2) && !defined(_LP64)
|
|
322 |
// C2 expects long results in G1 we can't tell if we're returning to interpreted
|
|
323 |
// or compiled so just be safe use G1 and O0/O1
|
|
324 |
|
|
325 |
// Shift bits into high (msb) of G1
|
|
326 |
__ sllx(Otos_l1->after_save(), 32, G1);
|
|
327 |
// Zero extend low bits
|
|
328 |
__ srl (Otos_l2->after_save(), 0, Otos_l2->after_save());
|
|
329 |
__ or3 (Otos_l2->after_save(), G1, G1);
|
|
330 |
#endif /* COMPILER2 */
|
|
331 |
break;
|
|
332 |
case T_OBJECT :
|
|
333 |
__ ld_ptr(O0, 0, O0->after_save());
|
|
334 |
__ verify_oop(O0->after_save()); // verify it
|
|
335 |
break;
|
|
336 |
default : ShouldNotReachHere();
|
|
337 |
}
|
|
338 |
__ retl();
|
|
339 |
__ delayed()->nop();
|
|
340 |
return entry;
|
|
341 |
}
|
|
342 |
|
|
343 |
address CppInterpreter::return_entry(TosState state, int length) {
|
|
344 |
// make it look good in the debugger
|
|
345 |
return CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation) + frame::pc_return_offset;
|
|
346 |
}
|
|
347 |
|
|
348 |
address CppInterpreter::deopt_entry(TosState state, int length) {
|
|
349 |
address ret = NULL;
|
|
350 |
if (length != 0) {
|
|
351 |
switch (state) {
|
|
352 |
case atos: ret = deopt_frame_manager_return_atos; break;
|
|
353 |
case btos: ret = deopt_frame_manager_return_btos; break;
|
|
354 |
case ctos:
|
|
355 |
case stos:
|
|
356 |
case itos: ret = deopt_frame_manager_return_itos; break;
|
|
357 |
case ltos: ret = deopt_frame_manager_return_ltos; break;
|
|
358 |
case ftos: ret = deopt_frame_manager_return_ftos; break;
|
|
359 |
case dtos: ret = deopt_frame_manager_return_dtos; break;
|
|
360 |
case vtos: ret = deopt_frame_manager_return_vtos; break;
|
|
361 |
}
|
|
362 |
} else {
|
|
363 |
ret = unctrap_frame_manager_entry; // re-execute the bytecode ( e.g. uncommon trap)
|
|
364 |
}
|
|
365 |
assert(ret != NULL, "Not initialized");
|
|
366 |
return ret;
|
|
367 |
}
|
|
368 |
|
|
369 |
//
|
|
370 |
// Helpers for commoning out cases in the various type of method entries.
|
|
371 |
//
|
|
372 |
|
|
373 |
// increment invocation count & check for overflow
|
|
374 |
//
|
|
375 |
// Note: checking for negative value instead of overflow
|
|
376 |
// so we have a 'sticky' overflow test
|
|
377 |
//
|
|
378 |
// Lmethod: method
|
|
379 |
// ??: invocation counter
|
|
380 |
//
|
|
381 |
void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
|
|
382 |
// Update standard invocation counters
|
|
383 |
__ increment_invocation_counter(O0, G3_scratch);
|
|
384 |
if (ProfileInterpreter) { // %%% Merge this into methodDataOop
|
|
385 |
__ ld_ptr(STATE(_method), G3_scratch);
|
|
386 |
Address interpreter_invocation_counter(G3_scratch, 0, in_bytes(methodOopDesc::interpreter_invocation_counter_offset()));
|
|
387 |
__ ld(interpreter_invocation_counter, G3_scratch);
|
|
388 |
__ inc(G3_scratch);
|
|
389 |
__ st(G3_scratch, interpreter_invocation_counter);
|
|
390 |
}
|
|
391 |
|
|
392 |
Address invocation_limit(G3_scratch, (address)&InvocationCounter::InterpreterInvocationLimit);
|
|
393 |
__ sethi(invocation_limit);
|
|
394 |
__ ld(invocation_limit, G3_scratch);
|
|
395 |
__ cmp(O0, G3_scratch);
|
|
396 |
__ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow);
|
|
397 |
__ delayed()->nop();
|
|
398 |
|
|
399 |
}
|
|
400 |
|
|
401 |
address InterpreterGenerator::generate_empty_entry(void) {
|
|
402 |
|
|
403 |
// A method that does nothing but return...
|
|
404 |
|
|
405 |
address entry = __ pc();
|
|
406 |
Label slow_path;
|
|
407 |
|
|
408 |
__ verify_oop(G5_method);
|
|
409 |
|
|
410 |
// do nothing for empty methods (do not even increment invocation counter)
|
|
411 |
if ( UseFastEmptyMethods) {
|
|
412 |
// If we need a safepoint check, generate full interpreter entry.
|
|
413 |
Address sync_state(G3_scratch, SafepointSynchronize::address_of_state());
|
|
414 |
__ load_contents(sync_state, G3_scratch);
|
|
415 |
__ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
|
|
416 |
__ br(Assembler::notEqual, false, Assembler::pn, frame_manager_entry);
|
|
417 |
__ delayed()->nop();
|
|
418 |
|
|
419 |
// Code: _return
|
|
420 |
__ retl();
|
|
421 |
__ delayed()->mov(O5_savedSP, SP);
|
|
422 |
return entry;
|
|
423 |
}
|
|
424 |
return NULL;
|
|
425 |
}
|
|
426 |
|
|
427 |
// Call an accessor method (assuming it is resolved, otherwise drop into
|
|
428 |
// vanilla (slow path) entry
|
|
429 |
|
|
430 |
// Generates code to elide accessor methods
|
|
431 |
// Uses G3_scratch and G1_scratch as scratch
|
|
432 |
address InterpreterGenerator::generate_accessor_entry(void) {
|
|
433 |
|
|
434 |
// Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof;
|
|
435 |
// parameter size = 1
|
|
436 |
// Note: We can only use this code if the getfield has been resolved
|
|
437 |
// and if we don't have a null-pointer exception => check for
|
|
438 |
// these conditions first and use slow path if necessary.
|
|
439 |
address entry = __ pc();
|
|
440 |
Label slow_path;
|
|
441 |
|
|
442 |
if ( UseFastAccessorMethods) {
|
|
443 |
// Check if we need to reach a safepoint and generate full interpreter
|
|
444 |
// frame if so.
|
|
445 |
Address sync_state(G3_scratch, SafepointSynchronize::address_of_state());
|
|
446 |
__ load_contents(sync_state, G3_scratch);
|
|
447 |
__ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
|
|
448 |
__ br(Assembler::notEqual, false, Assembler::pn, slow_path);
|
|
449 |
__ delayed()->nop();
|
|
450 |
|
|
451 |
// Check if local 0 != NULL
|
|
452 |
__ ld_ptr(Gargs, G0, Otos_i ); // get local 0
|
|
453 |
__ tst(Otos_i); // check if local 0 == NULL and go the slow path
|
|
454 |
__ brx(Assembler::zero, false, Assembler::pn, slow_path);
|
|
455 |
__ delayed()->nop();
|
|
456 |
|
|
457 |
|
|
458 |
// read first instruction word and extract bytecode @ 1 and index @ 2
|
|
459 |
// get first 4 bytes of the bytecodes (big endian!)
|
|
460 |
__ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc::const_offset())), G1_scratch);
|
|
461 |
__ ld(Address(G1_scratch, 0, in_bytes(constMethodOopDesc::codes_offset())), G1_scratch);
|
|
462 |
|
|
463 |
// move index @ 2 far left then to the right most two bytes.
|
|
464 |
__ sll(G1_scratch, 2*BitsPerByte, G1_scratch);
|
|
465 |
__ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words(
|
|
466 |
ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch);
|
|
467 |
|
|
468 |
// get constant pool cache
|
|
469 |
__ ld_ptr(G5_method, in_bytes(methodOopDesc::constants_offset()), G3_scratch);
|
|
470 |
__ ld_ptr(G3_scratch, constantPoolOopDesc::cache_offset_in_bytes(), G3_scratch);
|
|
471 |
|
|
472 |
// get specific constant pool cache entry
|
|
473 |
__ add(G3_scratch, G1_scratch, G3_scratch);
|
|
474 |
|
|
475 |
// Check the constant Pool cache entry to see if it has been resolved.
|
|
476 |
// If not, need the slow path.
|
|
477 |
ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
|
|
478 |
__ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::indices_offset()), G1_scratch);
|
|
479 |
__ srl(G1_scratch, 2*BitsPerByte, G1_scratch);
|
|
480 |
__ and3(G1_scratch, 0xFF, G1_scratch);
|
|
481 |
__ cmp(G1_scratch, Bytecodes::_getfield);
|
|
482 |
__ br(Assembler::notEqual, false, Assembler::pn, slow_path);
|
|
483 |
__ delayed()->nop();
|
|
484 |
|
|
485 |
// Get the type and return field offset from the constant pool cache
|
|
486 |
__ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()), G1_scratch);
|
|
487 |
__ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()), G3_scratch);
|
|
488 |
|
|
489 |
Label xreturn_path;
|
|
490 |
// Need to differentiate between igetfield, agetfield, bgetfield etc.
|
|
491 |
// because they are different sizes.
|
|
492 |
// Get the type from the constant pool cache
|
|
493 |
__ srl(G1_scratch, ConstantPoolCacheEntry::tosBits, G1_scratch);
|
|
494 |
// Make sure we don't need to mask G1_scratch for tosBits after the above shift
|
|
495 |
ConstantPoolCacheEntry::verify_tosBits();
|
|
496 |
__ cmp(G1_scratch, atos );
|
|
497 |
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
|
498 |
__ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i);
|
|
499 |
__ cmp(G1_scratch, itos);
|
|
500 |
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
|
501 |
__ delayed()->ld(Otos_i, G3_scratch, Otos_i);
|
|
502 |
__ cmp(G1_scratch, stos);
|
|
503 |
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
|
504 |
__ delayed()->ldsh(Otos_i, G3_scratch, Otos_i);
|
|
505 |
__ cmp(G1_scratch, ctos);
|
|
506 |
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
|
507 |
__ delayed()->lduh(Otos_i, G3_scratch, Otos_i);
|
|
508 |
#ifdef ASSERT
|
|
509 |
__ cmp(G1_scratch, btos);
|
|
510 |
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
|
511 |
__ delayed()->ldsb(Otos_i, G3_scratch, Otos_i);
|
|
512 |
__ should_not_reach_here();
|
|
513 |
#endif
|
|
514 |
__ ldsb(Otos_i, G3_scratch, Otos_i);
|
|
515 |
__ bind(xreturn_path);
|
|
516 |
|
|
517 |
// _ireturn/_areturn
|
|
518 |
__ retl(); // return from leaf routine
|
|
519 |
__ delayed()->mov(O5_savedSP, SP);
|
|
520 |
|
|
521 |
// Generate regular method entry
|
|
522 |
__ bind(slow_path);
|
|
523 |
__ ba(false, fast_accessor_slow_entry_path);
|
|
524 |
__ delayed()->nop();
|
|
525 |
return entry;
|
|
526 |
}
|
|
527 |
return NULL;
|
|
528 |
}
|
|
529 |
|
|
530 |
//
|
|
531 |
// Interpreter stub for calling a native method. (C++ interpreter)
|
|
532 |
// This sets up a somewhat different looking stack for calling the native method
|
|
533 |
// than the typical interpreter frame setup.
|
|
534 |
//
|
|
535 |
|
|
536 |
address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
|
537 |
address entry = __ pc();
|
|
538 |
|
|
539 |
// the following temporary registers are used during frame creation
|
|
540 |
const Register Gtmp1 = G3_scratch ;
|
|
541 |
const Register Gtmp2 = G1_scratch;
|
|
542 |
const Address size_of_parameters(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset()));
|
|
543 |
|
|
544 |
bool inc_counter = UseCompiler || CountCompiledCalls;
|
|
545 |
|
|
546 |
// make sure registers are different!
|
|
547 |
assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
|
|
548 |
|
|
549 |
const Address access_flags (G5_method, 0, in_bytes(methodOopDesc::access_flags_offset()));
|
|
550 |
|
|
551 |
Label Lentry;
|
|
552 |
__ bind(Lentry);
|
|
553 |
|
|
554 |
__ verify_oop(G5_method);
|
|
555 |
|
|
556 |
const Register Glocals_size = G3;
|
|
557 |
assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
|
|
558 |
|
|
559 |
// make sure method is native & not abstract
|
|
560 |
// rethink these assertions - they can be simplified and shared (gri 2/25/2000)
|
|
561 |
#ifdef ASSERT
|
|
562 |
__ ld(access_flags, Gtmp1);
|
|
563 |
{
|
|
564 |
Label L;
|
|
565 |
__ btst(JVM_ACC_NATIVE, Gtmp1);
|
|
566 |
__ br(Assembler::notZero, false, Assembler::pt, L);
|
|
567 |
__ delayed()->nop();
|
|
568 |
__ stop("tried to execute non-native method as native");
|
|
569 |
__ bind(L);
|
|
570 |
}
|
|
571 |
{ Label L;
|
|
572 |
__ btst(JVM_ACC_ABSTRACT, Gtmp1);
|
|
573 |
__ br(Assembler::zero, false, Assembler::pt, L);
|
|
574 |
__ delayed()->nop();
|
|
575 |
__ stop("tried to execute abstract method as non-abstract");
|
|
576 |
__ bind(L);
|
|
577 |
}
|
|
578 |
#endif // ASSERT
|
|
579 |
|
|
580 |
__ lduh(size_of_parameters, Gtmp1);
|
|
581 |
__ sll(Gtmp1, LogBytesPerWord, Gtmp2); // parameter size in bytes
|
|
582 |
__ add(Gargs, Gtmp2, Gargs); // points to first local + BytesPerWord
|
|
583 |
// NEW
|
|
584 |
__ add(Gargs, -wordSize, Gargs); // points to first local[0]
|
|
585 |
// generate the code to allocate the interpreter stack frame
|
|
586 |
// NEW FRAME ALLOCATED HERE
|
|
587 |
// save callers original sp
|
|
588 |
// __ mov(SP, I5_savedSP->after_restore());
|
|
589 |
|
|
590 |
generate_compute_interpreter_state(Lstate, G0, true);
|
|
591 |
|
|
592 |
// At this point Lstate points to new interpreter state
|
|
593 |
//
|
|
594 |
|
|
595 |
const Address do_not_unlock_if_synchronized(G2_thread, 0,
|
|
596 |
in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
|
|
597 |
// Since at this point in the method invocation the exception handler
|
|
598 |
// would try to exit the monitor of synchronized methods which hasn't
|
|
599 |
// been entered yet, we set the thread local variable
|
|
600 |
// _do_not_unlock_if_synchronized to true. If any exception was thrown by
|
|
601 |
// runtime, exception handling i.e. unlock_if_synchronized_method will
|
|
602 |
// check this thread local flag.
|
|
603 |
// This flag has two effects, one is to force an unwind in the topmost
|
|
604 |
// interpreter frame and not perform an unlock while doing so.
|
|
605 |
|
|
606 |
__ movbool(true, G3_scratch);
|
|
607 |
__ stbool(G3_scratch, do_not_unlock_if_synchronized);
|
|
608 |
|
|
609 |
|
|
610 |
// increment invocation counter and check for overflow
|
|
611 |
//
|
|
612 |
// Note: checking for negative value instead of overflow
|
|
613 |
// so we have a 'sticky' overflow test (may be of
|
|
614 |
// importance as soon as we have true MT/MP)
|
|
615 |
Label invocation_counter_overflow;
|
|
616 |
if (inc_counter) {
|
|
617 |
generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
|
|
618 |
}
|
|
619 |
Label Lcontinue;
|
|
620 |
__ bind(Lcontinue);
|
|
621 |
|
|
622 |
bang_stack_shadow_pages(true);
|
|
623 |
// reset the _do_not_unlock_if_synchronized flag
|
|
624 |
__ stbool(G0, do_not_unlock_if_synchronized);
|
|
625 |
|
|
626 |
// check for synchronized methods
|
|
627 |
// Must happen AFTER invocation_counter check, so method is not locked
|
|
628 |
// if counter overflows.
|
|
629 |
|
|
630 |
if (synchronized) {
|
|
631 |
lock_method();
|
|
632 |
// Don't see how G2_thread is preserved here...
|
|
633 |
// __ verify_thread(); QQQ destroys L0,L1 can't use
|
|
634 |
} else {
|
|
635 |
#ifdef ASSERT
|
|
636 |
{ Label ok;
|
|
637 |
__ ld_ptr(STATE(_method), G5_method);
|
|
638 |
__ ld(access_flags, O0);
|
|
639 |
__ btst(JVM_ACC_SYNCHRONIZED, O0);
|
|
640 |
__ br( Assembler::zero, false, Assembler::pt, ok);
|
|
641 |
__ delayed()->nop();
|
|
642 |
__ stop("method needs synchronization");
|
|
643 |
__ bind(ok);
|
|
644 |
}
|
|
645 |
#endif // ASSERT
|
|
646 |
}
|
|
647 |
|
|
648 |
// start execution
|
|
649 |
|
|
650 |
// __ verify_thread(); kills L1,L2 can't use at the moment
|
|
651 |
|
|
652 |
// jvmti/jvmpi support
|
|
653 |
__ notify_method_entry();
|
|
654 |
|
|
655 |
// native call
|
|
656 |
|
|
657 |
// (note that O0 is never an oop--at most it is a handle)
|
|
658 |
// It is important not to smash any handles created by this call,
|
|
659 |
// until any oop handle in O0 is dereferenced.
|
|
660 |
|
|
661 |
// (note that the space for outgoing params is preallocated)
|
|
662 |
|
|
663 |
// get signature handler
|
|
664 |
|
|
665 |
Label pending_exception_present;
|
|
666 |
|
|
667 |
{ Label L;
|
|
668 |
__ ld_ptr(STATE(_method), G5_method);
|
|
669 |
__ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc::signature_handler_offset())), G3_scratch);
|
|
670 |
__ tst(G3_scratch);
|
|
671 |
__ brx(Assembler::notZero, false, Assembler::pt, L);
|
|
672 |
__ delayed()->nop();
|
|
673 |
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), G5_method, false);
|
|
674 |
__ ld_ptr(STATE(_method), G5_method);
|
|
675 |
|
|
676 |
Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
|
|
677 |
__ ld_ptr(exception_addr, G3_scratch);
|
|
678 |
__ br_notnull(G3_scratch, false, Assembler::pn, pending_exception_present);
|
|
679 |
__ delayed()->nop();
|
|
680 |
__ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc::signature_handler_offset())), G3_scratch);
|
|
681 |
__ bind(L);
|
|
682 |
}
|
|
683 |
|
|
684 |
// Push a new frame so that the args will really be stored in
|
|
685 |
// Copy a few locals across so the new frame has the variables
|
|
686 |
// we need but these values will be dead at the jni call and
|
|
687 |
// therefore not gc volatile like the values in the current
|
|
688 |
// frame (Lstate in particular)
|
|
689 |
|
|
690 |
// Flush the state pointer to the register save area
|
|
691 |
// Which is the only register we need for a stack walk.
|
|
692 |
__ st_ptr(Lstate, SP, (Lstate->sp_offset_in_saved_window() * wordSize) + STACK_BIAS);
|
|
693 |
|
|
694 |
__ mov(Lstate, O1); // Need to pass the state pointer across the frame
|
|
695 |
|
|
696 |
// Calculate current frame size
|
|
697 |
__ sub(SP, FP, O3); // Calculate negative of current frame size
|
|
698 |
__ save(SP, O3, SP); // Allocate an identical sized frame
|
|
699 |
|
|
700 |
__ mov(I1, Lstate); // In the "natural" register.
|
|
701 |
|
|
702 |
// Note I7 has leftover trash. Slow signature handler will fill it in
|
|
703 |
// should we get there. Normal jni call will set reasonable last_Java_pc
|
|
704 |
// below (and fix I7 so the stack trace doesn't have a meaningless frame
|
|
705 |
// in it).
|
|
706 |
|
|
707 |
|
|
708 |
// call signature handler
|
|
709 |
__ ld_ptr(STATE(_method), Lmethod);
|
|
710 |
__ ld_ptr(STATE(_locals), Llocals);
|
|
711 |
|
|
712 |
__ callr(G3_scratch, 0);
|
|
713 |
__ delayed()->nop();
|
|
714 |
__ ld_ptr(STATE(_thread), G2_thread); // restore thread (shouldn't be needed)
|
|
715 |
|
|
716 |
{ Label not_static;
|
|
717 |
|
|
718 |
__ ld_ptr(STATE(_method), G5_method);
|
|
719 |
__ ld(access_flags, O0);
|
|
720 |
__ btst(JVM_ACC_STATIC, O0);
|
|
721 |
__ br( Assembler::zero, false, Assembler::pt, not_static);
|
|
722 |
__ delayed()->
|
|
723 |
// get native function entry point(O0 is a good temp until the very end)
|
|
724 |
ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc::native_function_offset())), O0);
|
|
725 |
// for static methods insert the mirror argument
|
|
726 |
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
|
|
727 |
|
|
728 |
__ ld_ptr(Address(G5_method, 0, in_bytes(methodOopDesc:: constants_offset())), O1);
|
|
729 |
__ ld_ptr(Address(O1, 0, constantPoolOopDesc::pool_holder_offset_in_bytes()), O1);
|
|
730 |
__ ld_ptr(O1, mirror_offset, O1);
|
|
731 |
// where the mirror handle body is allocated:
|
|
732 |
#ifdef ASSERT
|
|
733 |
if (!PrintSignatureHandlers) // do not dirty the output with this
|
|
734 |
{ Label L;
|
|
735 |
__ tst(O1);
|
|
736 |
__ brx(Assembler::notZero, false, Assembler::pt, L);
|
|
737 |
__ delayed()->nop();
|
|
738 |
__ stop("mirror is missing");
|
|
739 |
__ bind(L);
|
|
740 |
}
|
|
741 |
#endif // ASSERT
|
|
742 |
__ st_ptr(O1, STATE(_oop_temp));
|
|
743 |
__ add(STATE(_oop_temp), O1); // this is really an LEA not an add
|
|
744 |
__ bind(not_static);
|
|
745 |
}
|
|
746 |
|
|
747 |
// At this point, arguments have been copied off of stack into
|
|
748 |
// their JNI positions, which are O1..O5 and SP[68..].
|
|
749 |
// Oops are boxed in-place on the stack, with handles copied to arguments.
|
|
750 |
// The result handler is in Lscratch. O0 will shortly hold the JNIEnv*.
|
|
751 |
|
|
752 |
#ifdef ASSERT
|
|
753 |
{ Label L;
|
|
754 |
__ tst(O0);
|
|
755 |
__ brx(Assembler::notZero, false, Assembler::pt, L);
|
|
756 |
__ delayed()->nop();
|
|
757 |
__ stop("native entry point is missing");
|
|
758 |
__ bind(L);
|
|
759 |
}
|
|
760 |
#endif // ASSERT
|
|
761 |
|
|
762 |
//
|
|
763 |
// setup the java frame anchor
|
|
764 |
//
|
|
765 |
// The scavenge function only needs to know that the PC of this frame is
|
|
766 |
// in the interpreter method entry code, it doesn't need to know the exact
|
|
767 |
// PC and hence we can use O7 which points to the return address from the
|
|
768 |
// previous call in the code stream (signature handler function)
|
|
769 |
//
|
|
770 |
// The other trick is we set last_Java_sp to FP instead of the usual SP because
|
|
771 |
// we have pushed the extra frame in order to protect the volatile register(s)
|
|
772 |
// in that frame when we return from the jni call
|
|
773 |
//
|
|
774 |
|
|
775 |
|
|
776 |
__ set_last_Java_frame(FP, O7);
|
|
777 |
__ mov(O7, I7); // make dummy interpreter frame look like one above,
|
|
778 |
// not meaningless information that'll confuse me.
|
|
779 |
|
|
780 |
// flush the windows now. We don't care about the current (protection) frame
|
|
781 |
// only the outer frames
|
|
782 |
|
|
783 |
__ flush_windows();
|
|
784 |
|
|
785 |
// mark windows as flushed
|
|
786 |
Address flags(G2_thread,
|
|
787 |
0,
|
|
788 |
in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset()));
|
|
789 |
__ set(JavaFrameAnchor::flushed, G3_scratch);
|
|
790 |
__ st(G3_scratch, flags);
|
|
791 |
|
|
792 |
// Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready.
|
|
793 |
|
|
794 |
Address thread_state(G2_thread, 0, in_bytes(JavaThread::thread_state_offset()));
|
|
795 |
#ifdef ASSERT
|
|
796 |
{ Label L;
|
|
797 |
__ ld(thread_state, G3_scratch);
|
|
798 |
__ cmp(G3_scratch, _thread_in_Java);
|
|
799 |
__ br(Assembler::equal, false, Assembler::pt, L);
|
|
800 |
__ delayed()->nop();
|
|
801 |
__ stop("Wrong thread state in native stub");
|
|
802 |
__ bind(L);
|
|
803 |
}
|
|
804 |
#endif // ASSERT
|
|
805 |
__ set(_thread_in_native, G3_scratch);
|
|
806 |
__ st(G3_scratch, thread_state);
|
|
807 |
|
|
808 |
// Call the jni method, using the delay slot to set the JNIEnv* argument.
|
|
809 |
__ callr(O0, 0);
|
|
810 |
__ delayed()->
|
|
811 |
add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0);
|
|
812 |
__ ld_ptr(STATE(_thread), G2_thread); // restore thread
|
|
813 |
|
|
814 |
// must we block?
|
|
815 |
|
|
816 |
// Block, if necessary, before resuming in _thread_in_Java state.
|
|
817 |
// In order for GC to work, don't clear the last_Java_sp until after blocking.
|
|
818 |
{ Label no_block;
|
|
819 |
Address sync_state(G3_scratch, SafepointSynchronize::address_of_state());
|
|
820 |
|
|
821 |
// Switch thread to "native transition" state before reading the synchronization state.
|
|
822 |
// This additional state is necessary because reading and testing the synchronization
|
|
823 |
// state is not atomic w.r.t. GC, as this scenario demonstrates:
|
|
824 |
// Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
|
|
825 |
// VM thread changes sync state to synchronizing and suspends threads for GC.
|
|
826 |
// Thread A is resumed to finish this native method, but doesn't block here since it
|
|
827 |
// didn't see any synchronization is progress, and escapes.
|
|
828 |
__ set(_thread_in_native_trans, G3_scratch);
|
|
829 |
__ st(G3_scratch, thread_state);
|
|
830 |
if(os::is_MP()) {
|
|
831 |
// Write serialization page so VM thread can do a pseudo remote membar.
|
|
832 |
// We use the current thread pointer to calculate a thread specific
|
|
833 |
// offset to write to within the page. This minimizes bus traffic
|
|
834 |
// due to cache line collision.
|
|
835 |
__ serialize_memory(G2_thread, G1_scratch, G3_scratch);
|
|
836 |
}
|
|
837 |
__ load_contents(sync_state, G3_scratch);
|
|
838 |
__ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
|
|
839 |
|
|
840 |
|
|
841 |
Label L;
|
|
842 |
Address suspend_state(G2_thread, 0, in_bytes(JavaThread::suspend_flags_offset()));
|
|
843 |
__ br(Assembler::notEqual, false, Assembler::pn, L);
|
|
844 |
__ delayed()->
|
|
845 |
ld(suspend_state, G3_scratch);
|
|
846 |
__ cmp(G3_scratch, 0);
|
|
847 |
__ br(Assembler::equal, false, Assembler::pt, no_block);
|
|
848 |
__ delayed()->nop();
|
|
849 |
__ bind(L);
|
|
850 |
|
|
851 |
// Block. Save any potential method result value before the operation and
|
|
852 |
// use a leaf call to leave the last_Java_frame setup undisturbed.
|
|
853 |
save_native_result();
|
|
854 |
__ call_VM_leaf(noreg,
|
|
855 |
CAST_FROM_FN_PTR(address, JavaThread::check_safepoint_and_suspend_for_native_trans),
|
|
856 |
G2_thread);
|
|
857 |
__ ld_ptr(STATE(_thread), G2_thread); // restore thread
|
|
858 |
// Restore any method result value
|
|
859 |
restore_native_result();
|
|
860 |
__ bind(no_block);
|
|
861 |
}
|
|
862 |
|
|
863 |
// Clear the frame anchor now
|
|
864 |
|
|
865 |
__ reset_last_Java_frame();
|
|
866 |
|
|
867 |
// Move the result handler address
|
|
868 |
__ mov(Lscratch, G3_scratch);
|
|
869 |
// return possible result to the outer frame
|
|
870 |
#ifndef __LP64
|
|
871 |
__ mov(O0, I0);
|
|
872 |
__ restore(O1, G0, O1);
|
|
873 |
#else
|
|
874 |
__ restore(O0, G0, O0);
|
|
875 |
#endif /* __LP64 */
|
|
876 |
|
|
877 |
// Move result handler to expected register
|
|
878 |
__ mov(G3_scratch, Lscratch);
|
|
879 |
|
|
880 |
|
|
881 |
// thread state is thread_in_native_trans. Any safepoint blocking has
|
|
882 |
// happened in the trampoline we are ready to switch to thread_in_Java.
|
|
883 |
|
|
884 |
__ set(_thread_in_Java, G3_scratch);
|
|
885 |
__ st(G3_scratch, thread_state);
|
|
886 |
|
|
887 |
// If we have an oop result store it where it will be safe for any further gc
|
|
888 |
// until we return now that we've released the handle it might be protected by
|
|
889 |
|
|
890 |
{
|
|
891 |
Label no_oop, store_result;
|
|
892 |
|
|
893 |
__ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch);
|
|
894 |
__ cmp(G3_scratch, Lscratch);
|
|
895 |
__ brx(Assembler::notEqual, false, Assembler::pt, no_oop);
|
|
896 |
__ delayed()->nop();
|
|
897 |
__ addcc(G0, O0, O0);
|
|
898 |
__ brx(Assembler::notZero, true, Assembler::pt, store_result); // if result is not NULL:
|
|
899 |
__ delayed()->ld_ptr(O0, 0, O0); // unbox it
|
|
900 |
__ mov(G0, O0);
|
|
901 |
|
|
902 |
__ bind(store_result);
|
|
903 |
// Store it where gc will look for it and result handler expects it.
|
|
904 |
__ st_ptr(O0, STATE(_oop_temp));
|
|
905 |
|
|
906 |
__ bind(no_oop);
|
|
907 |
|
|
908 |
}
|
|
909 |
|
|
910 |
// reset handle block
|
|
911 |
__ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), G3_scratch);
|
|
912 |
__ st_ptr(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes());
|
|
913 |
|
|
914 |
|
|
915 |
// handle exceptions (exception handling will handle unlocking!)
|
|
916 |
{ Label L;
|
|
917 |
Address exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
|
|
918 |
|
|
919 |
__ ld_ptr(exception_addr, Gtemp);
|
|
920 |
__ tst(Gtemp);
|
|
921 |
__ brx(Assembler::equal, false, Assembler::pt, L);
|
|
922 |
__ delayed()->nop();
|
|
923 |
__ bind(pending_exception_present);
|
|
924 |
// With c++ interpreter we just leave it pending caller will do the correct thing. However...
|
|
925 |
// Like x86 we ignore the result of the native call and leave the method locked. This
|
|
926 |
// seems wrong to leave things locked.
|
|
927 |
|
|
928 |
__ br(Assembler::always, false, Assembler::pt, StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
|
|
929 |
__ delayed()->restore(I5_savedSP, G0, SP); // remove interpreter frame
|
|
930 |
|
|
931 |
__ bind(L);
|
|
932 |
}
|
|
933 |
|
|
934 |
// jvmdi/jvmpi support (preserves thread register)
|
|
935 |
__ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI);
|
|
936 |
|
|
937 |
if (synchronized) {
|
|
938 |
// save and restore any potential method result value around the unlocking operation
|
|
939 |
save_native_result();
|
|
940 |
|
|
941 |
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
|
|
942 |
// Get the initial monitor we allocated
|
|
943 |
__ sub(Lstate, entry_size, O1); // initial monitor
|
|
944 |
__ unlock_object(O1);
|
|
945 |
restore_native_result();
|
|
946 |
}
|
|
947 |
|
|
948 |
#if defined(COMPILER2) && !defined(_LP64)
|
|
949 |
|
|
950 |
// C2 expects long results in G1 we can't tell if we're returning to interpreted
|
|
951 |
// or compiled so just be safe.
|
|
952 |
|
|
953 |
__ sllx(O0, 32, G1); // Shift bits into high G1
|
|
954 |
__ srl (O1, 0, O1); // Zero extend O1
|
|
955 |
__ or3 (O1, G1, G1); // OR 64 bits into G1
|
|
956 |
|
|
957 |
#endif /* COMPILER2 && !_LP64 */
|
|
958 |
|
|
959 |
#ifdef ASSERT
|
|
960 |
{
|
|
961 |
Label ok;
|
|
962 |
__ cmp(I5_savedSP, FP);
|
|
963 |
__ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, ok);
|
|
964 |
__ delayed()->nop();
|
|
965 |
__ stop("bad I5_savedSP value");
|
|
966 |
__ should_not_reach_here();
|
|
967 |
__ bind(ok);
|
|
968 |
}
|
|
969 |
#endif
|
|
970 |
// Calls result handler which POPS FRAME
|
|
971 |
if (TraceJumps) {
|
|
972 |
// Move target to register that is recordable
|
|
973 |
__ mov(Lscratch, G3_scratch);
|
|
974 |
__ JMP(G3_scratch, 0);
|
|
975 |
} else {
|
|
976 |
__ jmp(Lscratch, 0);
|
|
977 |
}
|
|
978 |
__ delayed()->nop();
|
|
979 |
|
|
980 |
if (inc_counter) {
|
|
981 |
// handle invocation counter overflow
|
|
982 |
__ bind(invocation_counter_overflow);
|
|
983 |
generate_counter_overflow(Lcontinue);
|
|
984 |
}
|
|
985 |
|
|
986 |
|
|
987 |
return entry;
|
|
988 |
}
|
|
989 |
|
|
990 |
void CppInterpreterGenerator::generate_compute_interpreter_state(const Register state,
|
|
991 |
const Register prev_state,
|
|
992 |
bool native) {
|
|
993 |
|
|
994 |
// On entry
|
|
995 |
// G5_method - caller's method
|
|
996 |
// Gargs - points to initial parameters (i.e. locals[0])
|
|
997 |
// G2_thread - valid? (C1 only??)
|
|
998 |
// "prev_state" - contains any previous frame manager state which we must save a link
|
|
999 |
//
|
|
1000 |
// On return
|
|
1001 |
// "state" is a pointer to the newly allocated state object. We must allocate and initialize
|
|
1002 |
// a new interpretState object and the method expression stack.
|
|
1003 |
|
|
1004 |
assert_different_registers(state, prev_state);
|
|
1005 |
assert_different_registers(prev_state, G3_scratch);
|
|
1006 |
const Register Gtmp = G3_scratch;
|
|
1007 |
const Address constants (G5_method, 0, in_bytes(methodOopDesc::constants_offset()));
|
|
1008 |
const Address access_flags (G5_method, 0, in_bytes(methodOopDesc::access_flags_offset()));
|
|
1009 |
const Address size_of_parameters(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset()));
|
|
1010 |
const Address max_stack (G5_method, 0, in_bytes(methodOopDesc::max_stack_offset()));
|
|
1011 |
const Address size_of_locals (G5_method, 0, in_bytes(methodOopDesc::size_of_locals_offset()));
|
|
1012 |
|
|
1013 |
// slop factor is two extra slots on the expression stack so that
|
|
1014 |
// we always have room to store a result when returning from a call without parameters
|
|
1015 |
// that returns a result.
|
|
1016 |
|
|
1017 |
const int slop_factor = 2*wordSize;
|
|
1018 |
|
|
1019 |
const int fixed_size = ((sizeof(BytecodeInterpreter) + slop_factor) >> LogBytesPerWord) + // what is the slop factor?
|
|
1020 |
frame::memory_parameter_word_sp_offset + // register save area + param window
|
|
1021 |
(native ? frame::interpreter_frame_extra_outgoing_argument_words : 0); // JNI, class
|
|
1022 |
|
|
1023 |
// XXX G5_method valid
|
|
1024 |
|
|
1025 |
// Now compute new frame size
|
|
1026 |
|
|
1027 |
if (native) {
|
|
1028 |
__ lduh( size_of_parameters, Gtmp );
|
|
1029 |
__ calc_mem_param_words(Gtmp, Gtmp); // space for native call parameters passed on the stack in words
|
|
1030 |
} else {
|
|
1031 |
__ lduh(max_stack, Gtmp); // Full size expression stack
|
|
1032 |
}
|
|
1033 |
__ add(Gtmp, fixed_size, Gtmp); // plus the fixed portion
|
|
1034 |
|
|
1035 |
__ neg(Gtmp); // negative space for stack/parameters in words
|
|
1036 |
__ and3(Gtmp, -WordsPerLong, Gtmp); // make multiple of 2 (SP must be 2-word aligned)
|
|
1037 |
__ sll(Gtmp, LogBytesPerWord, Gtmp); // negative space for frame in bytes
|
|
1038 |
|
|
1039 |
// Need to do stack size check here before we fault on large frames
|
|
1040 |
|
|
1041 |
Label stack_ok;
|
|
1042 |
|
|
1043 |
const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
|
|
1044 |
(StackRedPages+StackYellowPages);
|
|
1045 |
|
|
1046 |
|
|
1047 |
__ ld_ptr(G2_thread, in_bytes(Thread::stack_base_offset()), O0);
|
|
1048 |
__ ld_ptr(G2_thread, in_bytes(Thread::stack_size_offset()), O1);
|
|
1049 |
// compute stack bottom
|
|
1050 |
__ sub(O0, O1, O0);
|
|
1051 |
|
|
1052 |
// Avoid touching the guard pages
|
|
1053 |
// Also a fudge for frame size of BytecodeInterpreter::run
|
|
1054 |
// It varies from 1k->4k depending on build type
|
|
1055 |
const int fudge = 6 * K;
|
|
1056 |
|
|
1057 |
__ set(fudge + (max_pages * os::vm_page_size()), O1);
|
|
1058 |
|
|
1059 |
__ add(O0, O1, O0);
|
|
1060 |
__ sub(O0, Gtmp, O0);
|
|
1061 |
__ cmp(SP, O0);
|
|
1062 |
__ brx(Assembler::greaterUnsigned, false, Assembler::pt, stack_ok);
|
|
1063 |
__ delayed()->nop();
|
|
1064 |
|
|
1065 |
// throw exception return address becomes throwing pc
|
|
1066 |
|
|
1067 |
__ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
|
|
1068 |
__ stop("never reached");
|
|
1069 |
|
|
1070 |
__ bind(stack_ok);
|
|
1071 |
|
|
1072 |
__ save(SP, Gtmp, SP); // setup new frame and register window
|
|
1073 |
|
|
1074 |
// New window I7 call_stub or previous activation
|
|
1075 |
// O6 - register save area, BytecodeInterpreter just below it, args/locals just above that
|
|
1076 |
//
|
|
1077 |
__ sub(FP, sizeof(BytecodeInterpreter), state); // Point to new Interpreter state
|
|
1078 |
__ add(state, STACK_BIAS, state ); // Account for 64bit bias
|
|
1079 |
|
|
1080 |
#define XXX_STATE(field_name) state, in_bytes(byte_offset_of(BytecodeInterpreter, field_name))
|
|
1081 |
|
|
1082 |
// Initialize a new Interpreter state
|
|
1083 |
// orig_sp - caller's original sp
|
|
1084 |
// G2_thread - thread
|
|
1085 |
// Gargs - &locals[0] (unbiased?)
|
|
1086 |
// G5_method - method
|
|
1087 |
// SP (biased) - accounts for full size java stack, BytecodeInterpreter object, register save area, and register parameter save window
|
|
1088 |
|
|
1089 |
|
|
1090 |
__ set(0xdead0004, O1);
|
|
1091 |
|
|
1092 |
|
|
1093 |
__ st_ptr(Gargs, XXX_STATE(_locals));
|
|
1094 |
__ st_ptr(G0, XXX_STATE(_oop_temp));
|
|
1095 |
|
|
1096 |
__ st_ptr(state, XXX_STATE(_self_link)); // point to self
|
|
1097 |
__ st_ptr(prev_state->after_save(), XXX_STATE(_prev_link)); // Chain interpreter states
|
|
1098 |
__ st_ptr(G2_thread, XXX_STATE(_thread)); // Store javathread
|
|
1099 |
|
|
1100 |
if (native) {
|
|
1101 |
__ st_ptr(G0, XXX_STATE(_bcp));
|
|
1102 |
} else {
|
|
1103 |
__ ld_ptr(G5_method, in_bytes(methodOopDesc::const_offset()), O2); // get constMethodOop
|
|
1104 |
__ add(O2, in_bytes(constMethodOopDesc::codes_offset()), O2); // get bcp
|
|
1105 |
__ st_ptr(O2, XXX_STATE(_bcp));
|
|
1106 |
}
|
|
1107 |
|
|
1108 |
__ st_ptr(G0, XXX_STATE(_mdx));
|
|
1109 |
__ st_ptr(G5_method, XXX_STATE(_method));
|
|
1110 |
|
|
1111 |
__ set((int) BytecodeInterpreter::method_entry, O1);
|
|
1112 |
__ st(O1, XXX_STATE(_msg));
|
|
1113 |
|
|
1114 |
__ ld_ptr(constants, O3);
|
|
1115 |
__ ld_ptr(O3, constantPoolOopDesc::cache_offset_in_bytes(), O2);
|
|
1116 |
__ st_ptr(O2, XXX_STATE(_constants));
|
|
1117 |
|
|
1118 |
__ st_ptr(G0, XXX_STATE(_result._to_call._callee));
|
|
1119 |
|
|
1120 |
// Monitor base is just start of BytecodeInterpreter object;
|
|
1121 |
__ mov(state, O2);
|
|
1122 |
__ st_ptr(O2, XXX_STATE(_monitor_base));
|
|
1123 |
|
|
1124 |
// Do we need a monitor for synchonized method?
|
|
1125 |
{
|
|
1126 |
__ ld(access_flags, O1);
|
|
1127 |
Label done;
|
|
1128 |
Label got_obj;
|
|
1129 |
__ btst(JVM_ACC_SYNCHRONIZED, O1);
|
|
1130 |
__ br( Assembler::zero, false, Assembler::pt, done);
|
|
1131 |
|
|
1132 |
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
|
|
1133 |
__ delayed()->btst(JVM_ACC_STATIC, O1);
|
|
1134 |
__ ld_ptr(XXX_STATE(_locals), O1);
|
|
1135 |
__ br( Assembler::zero, true, Assembler::pt, got_obj);
|
|
1136 |
__ delayed()->ld_ptr(O1, 0, O1); // get receiver for not-static case
|
|
1137 |
__ ld_ptr(constants, O1);
|
|
1138 |
__ ld_ptr( O1, constantPoolOopDesc::pool_holder_offset_in_bytes(), O1);
|
|
1139 |
// lock the mirror, not the klassOop
|
|
1140 |
__ ld_ptr( O1, mirror_offset, O1);
|
|
1141 |
|
|
1142 |
__ bind(got_obj);
|
|
1143 |
|
|
1144 |
#ifdef ASSERT
|
|
1145 |
__ tst(O1);
|
|
1146 |
__ breakpoint_trap(Assembler::zero);
|
|
1147 |
#endif // ASSERT
|
|
1148 |
|
|
1149 |
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
|
|
1150 |
__ sub(SP, entry_size, SP); // account for initial monitor
|
|
1151 |
__ sub(O2, entry_size, O2); // initial monitor
|
|
1152 |
__ st_ptr(O1, O2, BasicObjectLock::obj_offset_in_bytes()); // and allocate it for interpreter use
|
|
1153 |
__ bind(done);
|
|
1154 |
}
|
|
1155 |
|
|
1156 |
// Remember initial frame bottom
|
|
1157 |
|
|
1158 |
__ st_ptr(SP, XXX_STATE(_frame_bottom));
|
|
1159 |
|
|
1160 |
__ st_ptr(O2, XXX_STATE(_stack_base));
|
|
1161 |
|
|
1162 |
__ sub(O2, wordSize, O2); // prepush
|
|
1163 |
__ st_ptr(O2, XXX_STATE(_stack)); // PREPUSH
|
|
1164 |
|
|
1165 |
__ lduh(max_stack, O3); // Full size expression stack
|
|
1166 |
__ sll(O3, LogBytesPerWord, O3);
|
|
1167 |
__ sub(O2, O3, O3);
|
|
1168 |
// __ sub(O3, wordSize, O3); // so prepush doesn't look out of bounds
|
|
1169 |
__ st_ptr(O3, XXX_STATE(_stack_limit));
|
|
1170 |
|
|
1171 |
if (!native) {
|
|
1172 |
//
|
|
1173 |
// Code to initialize locals
|
|
1174 |
//
|
|
1175 |
Register init_value = noreg; // will be G0 if we must clear locals
|
|
1176 |
// Now zero locals
|
|
1177 |
if (true /* zerolocals */ || ClearInterpreterLocals) {
|
|
1178 |
// explicitly initialize locals
|
|
1179 |
init_value = G0;
|
|
1180 |
} else {
|
|
1181 |
#ifdef ASSERT
|
|
1182 |
// initialize locals to a garbage pattern for better debugging
|
|
1183 |
init_value = O3;
|
|
1184 |
__ set( 0x0F0F0F0F, init_value );
|
|
1185 |
#endif // ASSERT
|
|
1186 |
}
|
|
1187 |
if (init_value != noreg) {
|
|
1188 |
Label clear_loop;
|
|
1189 |
|
|
1190 |
// NOTE: If you change the frame layout, this code will need to
|
|
1191 |
// be updated!
|
|
1192 |
__ lduh( size_of_locals, O2 );
|
|
1193 |
__ lduh( size_of_parameters, O1 );
|
|
1194 |
__ sll( O2, LogBytesPerWord, O2);
|
|
1195 |
__ sll( O1, LogBytesPerWord, O1 );
|
|
1196 |
__ ld_ptr(XXX_STATE(_locals), L2_scratch);
|
|
1197 |
__ sub( L2_scratch, O2, O2 );
|
|
1198 |
__ sub( L2_scratch, O1, O1 );
|
|
1199 |
|
|
1200 |
__ bind( clear_loop );
|
|
1201 |
__ inc( O2, wordSize );
|
|
1202 |
|
|
1203 |
__ cmp( O2, O1 );
|
|
1204 |
__ br( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop );
|
|
1205 |
__ delayed()->st_ptr( init_value, O2, 0 );
|
|
1206 |
}
|
|
1207 |
}
|
|
1208 |
}
|
|
1209 |
// Find preallocated monitor and lock method (C++ interpreter)
|
|
1210 |
//
|
|
1211 |
void InterpreterGenerator::lock_method(void) {
|
|
1212 |
// Lock the current method.
|
|
1213 |
// Destroys registers L2_scratch, L3_scratch, O0
|
|
1214 |
//
|
|
1215 |
// Find everything relative to Lstate
|
|
1216 |
|
|
1217 |
#ifdef ASSERT
|
|
1218 |
__ ld_ptr(STATE(_method), L2_scratch);
|
|
1219 |
__ ld(L2_scratch, in_bytes(methodOopDesc::access_flags_offset()), O0);
|
|
1220 |
|
|
1221 |
{ Label ok;
|
|
1222 |
__ btst(JVM_ACC_SYNCHRONIZED, O0);
|
|
1223 |
__ br( Assembler::notZero, false, Assembler::pt, ok);
|
|
1224 |
__ delayed()->nop();
|
|
1225 |
__ stop("method doesn't need synchronization");
|
|
1226 |
__ bind(ok);
|
|
1227 |
}
|
|
1228 |
#endif // ASSERT
|
|
1229 |
|
|
1230 |
// monitor is already allocated at stack base
|
|
1231 |
// and the lockee is already present
|
|
1232 |
__ ld_ptr(STATE(_stack_base), L2_scratch);
|
|
1233 |
__ ld_ptr(L2_scratch, BasicObjectLock::obj_offset_in_bytes(), O0); // get object
|
|
1234 |
__ lock_object(L2_scratch, O0);
|
|
1235 |
|
|
1236 |
}
|
|
1237 |
|
|
1238 |
// Generate code for handling resuming a deopted method
|
|
1239 |
void CppInterpreterGenerator::generate_deopt_handling() {
|
|
1240 |
|
|
1241 |
Label return_from_deopt_common;
|
|
1242 |
|
|
1243 |
// deopt needs to jump to here to enter the interpreter (return a result)
|
|
1244 |
deopt_frame_manager_return_atos = __ pc();
|
|
1245 |
|
|
1246 |
// O0/O1 live
|
|
1247 |
__ ba(false, return_from_deopt_common);
|
|
1248 |
__ delayed()->set(AbstractInterpreter::BasicType_as_index(T_OBJECT), L3_scratch); // Result stub address array index
|
|
1249 |
|
|
1250 |
|
|
1251 |
// deopt needs to jump to here to enter the interpreter (return a result)
|
|
1252 |
deopt_frame_manager_return_btos = __ pc();
|
|
1253 |
|
|
1254 |
// O0/O1 live
|
|
1255 |
__ ba(false, return_from_deopt_common);
|
|
1256 |
__ delayed()->set(AbstractInterpreter::BasicType_as_index(T_BOOLEAN), L3_scratch); // Result stub address array index
|
|
1257 |
|
|
1258 |
// deopt needs to jump to here to enter the interpreter (return a result)
|
|
1259 |
deopt_frame_manager_return_itos = __ pc();
|
|
1260 |
|
|
1261 |
// O0/O1 live
|
|
1262 |
__ ba(false, return_from_deopt_common);
|
|
1263 |
__ delayed()->set(AbstractInterpreter::BasicType_as_index(T_INT), L3_scratch); // Result stub address array index
|
|
1264 |
|
|
1265 |
// deopt needs to jump to here to enter the interpreter (return a result)
|
|
1266 |
|
|
1267 |
deopt_frame_manager_return_ltos = __ pc();
|
|
1268 |
#if !defined(_LP64) && defined(COMPILER2)
|
|
1269 |
// All return values are where we want them, except for Longs. C2 returns
|
|
1270 |
// longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
|
|
1271 |
// Since the interpreter will return longs in G1 and O0/O1 in the 32bit
|
|
1272 |
// build even if we are returning from interpreted we just do a little
|
|
1273 |
// stupid shuffing.
|
|
1274 |
// Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
|
|
1275 |
// do this here. Unfortunately if we did a rethrow we'd see an machepilog node
|
|
1276 |
// first which would move g1 -> O0/O1 and destroy the exception we were throwing.
|
|
1277 |
|
|
1278 |
__ srl (G1, 0,O1);
|
|
1279 |
__ srlx(G1,32,O0);
|
|
1280 |
#endif /* !_LP64 && COMPILER2 */
|
|
1281 |
// O0/O1 live
|
|
1282 |
__ ba(false, return_from_deopt_common);
|
|
1283 |
__ delayed()->set(AbstractInterpreter::BasicType_as_index(T_LONG), L3_scratch); // Result stub address array index
|
|
1284 |
|
|
1285 |
// deopt needs to jump to here to enter the interpreter (return a result)
|
|
1286 |
|
|
1287 |
deopt_frame_manager_return_ftos = __ pc();
|
|
1288 |
// O0/O1 live
|
|
1289 |
__ ba(false, return_from_deopt_common);
|
|
1290 |
__ delayed()->set(AbstractInterpreter::BasicType_as_index(T_FLOAT), L3_scratch); // Result stub address array index
|
|
1291 |
|
|
1292 |
// deopt needs to jump to here to enter the interpreter (return a result)
|
|
1293 |
deopt_frame_manager_return_dtos = __ pc();
|
|
1294 |
|
|
1295 |
// O0/O1 live
|
|
1296 |
__ ba(false, return_from_deopt_common);
|
|
1297 |
__ delayed()->set(AbstractInterpreter::BasicType_as_index(T_DOUBLE), L3_scratch); // Result stub address array index
|
|
1298 |
|
|
1299 |
// deopt needs to jump to here to enter the interpreter (return a result)
|
|
1300 |
deopt_frame_manager_return_vtos = __ pc();
|
|
1301 |
|
|
1302 |
// O0/O1 live
|
|
1303 |
__ set(AbstractInterpreter::BasicType_as_index(T_VOID), L3_scratch);
|
|
1304 |
|
|
1305 |
// Deopt return common
|
|
1306 |
// an index is present that lets us move any possible result being
|
|
1307 |
// return to the interpreter's stack
|
|
1308 |
//
|
|
1309 |
__ bind(return_from_deopt_common);
|
|
1310 |
|
|
1311 |
// Result if any is in native abi result (O0..O1/F0..F1). The java expression
|
|
1312 |
// stack is in the state that the calling convention left it.
|
|
1313 |
// Copy the result from native abi result and place it on java expression stack.
|
|
1314 |
|
|
1315 |
// Current interpreter state is present in Lstate
|
|
1316 |
|
|
1317 |
// Get current pre-pushed top of interpreter stack
|
|
1318 |
// Any result (if any) is in native abi
|
|
1319 |
// result type index is in L3_scratch
|
|
1320 |
|
|
1321 |
__ ld_ptr(STATE(_stack), L1_scratch); // get top of java expr stack
|
|
1322 |
|
|
1323 |
__ set((intptr_t)CppInterpreter::_tosca_to_stack, L4_scratch);
|
|
1324 |
__ sll(L3_scratch, LogBytesPerWord, L3_scratch);
|
|
1325 |
__ ld_ptr(L4_scratch, L3_scratch, Lscratch); // get typed result converter address
|
|
1326 |
__ jmpl(Lscratch, G0, O7); // and convert it
|
|
1327 |
__ delayed()->nop();
|
|
1328 |
|
|
1329 |
// L1_scratch points to top of stack (prepushed)
|
|
1330 |
__ st_ptr(L1_scratch, STATE(_stack));
|
|
1331 |
}
|
|
1332 |
|
|
1333 |
// Generate the code to handle a more_monitors message from the c++ interpreter
|
|
1334 |
void CppInterpreterGenerator::generate_more_monitors() {
|
|
1335 |
|
|
1336 |
Label entry, loop;
|
|
1337 |
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
|
|
1338 |
// 1. compute new pointers // esp: old expression stack top
|
|
1339 |
__ delayed()->ld_ptr(STATE(_stack_base), L4_scratch); // current expression stack bottom
|
|
1340 |
__ sub(L4_scratch, entry_size, L4_scratch);
|
|
1341 |
__ st_ptr(L4_scratch, STATE(_stack_base));
|
|
1342 |
|
|
1343 |
__ sub(SP, entry_size, SP); // Grow stack
|
|
1344 |
__ st_ptr(SP, STATE(_frame_bottom));
|
|
1345 |
|
|
1346 |
__ ld_ptr(STATE(_stack_limit), L2_scratch);
|
|
1347 |
__ sub(L2_scratch, entry_size, L2_scratch);
|
|
1348 |
__ st_ptr(L2_scratch, STATE(_stack_limit));
|
|
1349 |
|
|
1350 |
__ ld_ptr(STATE(_stack), L1_scratch); // Get current stack top
|
|
1351 |
__ sub(L1_scratch, entry_size, L1_scratch);
|
|
1352 |
__ st_ptr(L1_scratch, STATE(_stack));
|
|
1353 |
__ ba(false, entry);
|
|
1354 |
__ delayed()->add(L1_scratch, wordSize, L1_scratch); // first real entry (undo prepush)
|
|
1355 |
|
|
1356 |
// 2. move expression stack
|
|
1357 |
|
|
1358 |
__ bind(loop);
|
|
1359 |
__ st_ptr(L3_scratch, Address(L1_scratch, 0));
|
|
1360 |
__ add(L1_scratch, wordSize, L1_scratch);
|
|
1361 |
__ bind(entry);
|
|
1362 |
__ cmp(L1_scratch, L4_scratch);
|
|
1363 |
__ br(Assembler::notEqual, false, Assembler::pt, loop);
|
|
1364 |
__ delayed()->ld_ptr(L1_scratch, entry_size, L3_scratch);
|
|
1365 |
|
|
1366 |
// now zero the slot so we can find it.
|
370
|
1367 |
__ st_ptr(G0, L4_scratch, BasicObjectLock::obj_offset_in_bytes());
|
1
|
1368 |
|
|
1369 |
}
|
|
1370 |
|
|
1371 |
// Initial entry to C++ interpreter from the call_stub.
|
|
1372 |
// This entry point is called the frame manager since it handles the generation
|
|
1373 |
// of interpreter activation frames via requests directly from the vm (via call_stub)
|
|
1374 |
// and via requests from the interpreter. The requests from the call_stub happen
|
|
1375 |
// directly thru the entry point. Requests from the interpreter happen via returning
|
|
1376 |
// from the interpreter and examining the message the interpreter has returned to
|
|
1377 |
// the frame manager. The frame manager can take the following requests:
|
|
1378 |
|
|
1379 |
// NO_REQUEST - error, should never happen.
|
|
1380 |
// MORE_MONITORS - need a new monitor. Shuffle the expression stack on down and
|
|
1381 |
// allocate a new monitor.
|
|
1382 |
// CALL_METHOD - setup a new activation to call a new method. Very similar to what
|
|
1383 |
// happens during entry during the entry via the call stub.
|
|
1384 |
// RETURN_FROM_METHOD - remove an activation. Return to interpreter or call stub.
|
|
1385 |
//
|
|
1386 |
// Arguments:
|
|
1387 |
//
|
|
1388 |
// ebx: methodOop
|
|
1389 |
// ecx: receiver - unused (retrieved from stack as needed)
|
|
1390 |
// esi: previous frame manager state (NULL from the call_stub/c1/c2)
|
|
1391 |
//
|
|
1392 |
//
|
|
1393 |
// Stack layout at entry
|
|
1394 |
//
|
|
1395 |
// [ return address ] <--- esp
|
|
1396 |
// [ parameter n ]
|
|
1397 |
// ...
|
|
1398 |
// [ parameter 1 ]
|
|
1399 |
// [ expression stack ]
|
|
1400 |
//
|
|
1401 |
//
|
|
1402 |
// We are free to blow any registers we like because the call_stub which brought us here
|
|
1403 |
// initially has preserved the callee save registers already.
|
|
1404 |
//
|
|
1405 |
//
|
|
1406 |
|
|
1407 |
static address interpreter_frame_manager = NULL;
|
|
1408 |
|
|
1409 |
#ifdef ASSERT
|
|
1410 |
#define VALIDATE_STATE(scratch, marker) \
|
|
1411 |
{ \
|
|
1412 |
Label skip; \
|
|
1413 |
__ ld_ptr(STATE(_self_link), scratch); \
|
|
1414 |
__ cmp(Lstate, scratch); \
|
|
1415 |
__ brx(Assembler::equal, false, Assembler::pt, skip); \
|
|
1416 |
__ delayed()->nop(); \
|
|
1417 |
__ breakpoint_trap(); \
|
|
1418 |
__ emit_long(marker); \
|
|
1419 |
__ bind(skip); \
|
|
1420 |
}
|
|
1421 |
#else
|
|
1422 |
#define VALIDATE_STATE(scratch, marker)
|
|
1423 |
#endif /* ASSERT */
|
|
1424 |
|
|
1425 |
void CppInterpreterGenerator::adjust_callers_stack(Register args) {
|
|
1426 |
//
|
|
1427 |
// Adjust caller's stack so that all the locals can be contiguous with
|
|
1428 |
// the parameters.
|
|
1429 |
// Worries about stack overflow make this a pain.
|
|
1430 |
//
|
|
1431 |
// Destroys args, G3_scratch, G3_scratch
|
|
1432 |
// In/Out O5_savedSP (sender's original SP)
|
|
1433 |
//
|
|
1434 |
// assert_different_registers(state, prev_state);
|
|
1435 |
const Register Gtmp = G3_scratch;
|
|
1436 |
const Register tmp = O2;
|
|
1437 |
const Address size_of_parameters(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset()));
|
|
1438 |
const Address size_of_locals (G5_method, 0, in_bytes(methodOopDesc::size_of_locals_offset()));
|
|
1439 |
|
|
1440 |
__ lduh(size_of_parameters, tmp);
|
|
1441 |
__ sll(tmp, LogBytesPerWord, Gtmp); // parameter size in bytes
|
|
1442 |
__ add(args, Gtmp, Gargs); // points to first local + BytesPerWord
|
|
1443 |
// NEW
|
|
1444 |
__ add(Gargs, -wordSize, Gargs); // points to first local[0]
|
|
1445 |
// determine extra space for non-argument locals & adjust caller's SP
|
|
1446 |
// Gtmp1: parameter size in words
|
|
1447 |
__ lduh(size_of_locals, Gtmp);
|
|
1448 |
__ compute_extra_locals_size_in_bytes(tmp, Gtmp, Gtmp);
|
|
1449 |
|
|
1450 |
#if 1
|
|
1451 |
// c2i adapters place the final interpreter argument in the register save area for O0/I0
|
|
1452 |
// the call_stub will place the final interpreter argument at
|
|
1453 |
// frame::memory_parameter_word_sp_offset. This is mostly not noticable for either asm
|
|
1454 |
// or c++ interpreter. However with the c++ interpreter when we do a recursive call
|
|
1455 |
// and try to make it look good in the debugger we will store the argument to
|
|
1456 |
// RecursiveInterpreterActivation in the register argument save area. Without allocating
|
|
1457 |
// extra space for the compiler this will overwrite locals in the local array of the
|
|
1458 |
// interpreter.
|
|
1459 |
// QQQ still needed with frameless adapters???
|
|
1460 |
|
|
1461 |
const int c2i_adjust_words = frame::memory_parameter_word_sp_offset - frame::callee_register_argument_save_area_sp_offset;
|
|
1462 |
|
|
1463 |
__ add(Gtmp, c2i_adjust_words*wordSize, Gtmp);
|
|
1464 |
#endif // 1
|
|
1465 |
|
|
1466 |
|
|
1467 |
__ sub(SP, Gtmp, SP); // just caller's frame for the additional space we need.
|
|
1468 |
}
|
|
1469 |
|
|
1470 |
address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
|
1471 |
|
|
1472 |
// G5_method: methodOop
|
|
1473 |
// G2_thread: thread (unused)
|
|
1474 |
// Gargs: bottom of args (sender_sp)
|
|
1475 |
// O5: sender's sp
|
|
1476 |
|
|
1477 |
// A single frame manager is plenty as we don't specialize for synchronized. We could and
|
|
1478 |
// the code is pretty much ready. Would need to change the test below and for good measure
|
|
1479 |
// modify generate_interpreter_state to only do the (pre) sync stuff stuff for synchronized
|
|
1480 |
// routines. Not clear this is worth it yet.
|
|
1481 |
|
|
1482 |
if (interpreter_frame_manager) {
|
|
1483 |
return interpreter_frame_manager;
|
|
1484 |
}
|
|
1485 |
|
|
1486 |
__ bind(frame_manager_entry);
|
|
1487 |
|
|
1488 |
// the following temporary registers are used during frame creation
|
|
1489 |
const Register Gtmp1 = G3_scratch;
|
|
1490 |
// const Register Lmirror = L1; // native mirror (native calls only)
|
|
1491 |
|
|
1492 |
const Address constants (G5_method, 0, in_bytes(methodOopDesc::constants_offset()));
|
|
1493 |
const Address access_flags (G5_method, 0, in_bytes(methodOopDesc::access_flags_offset()));
|
|
1494 |
const Address size_of_parameters(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset()));
|
|
1495 |
const Address max_stack (G5_method, 0, in_bytes(methodOopDesc::max_stack_offset()));
|
|
1496 |
const Address size_of_locals (G5_method, 0, in_bytes(methodOopDesc::size_of_locals_offset()));
|
|
1497 |
|
|
1498 |
address entry_point = __ pc();
|
|
1499 |
__ mov(G0, prevState); // no current activation
|
|
1500 |
|
|
1501 |
|
|
1502 |
Label re_dispatch;
|
|
1503 |
|
|
1504 |
__ bind(re_dispatch);
|
|
1505 |
|
|
1506 |
// Interpreter needs to have locals completely contiguous. In order to do that
|
|
1507 |
// We must adjust the caller's stack pointer for any locals beyond just the
|
|
1508 |
// parameters
|
|
1509 |
adjust_callers_stack(Gargs);
|
|
1510 |
|
|
1511 |
// O5_savedSP still contains sender's sp
|
|
1512 |
|
|
1513 |
// NEW FRAME
|
|
1514 |
|
|
1515 |
generate_compute_interpreter_state(Lstate, prevState, false);
|
|
1516 |
|
|
1517 |
// At this point a new interpreter frame and state object are created and initialized
|
|
1518 |
// Lstate has the pointer to the new activation
|
|
1519 |
// Any stack banging or limit check should already be done.
|
|
1520 |
|
|
1521 |
Label call_interpreter;
|
|
1522 |
|
|
1523 |
__ bind(call_interpreter);
|
|
1524 |
|
|
1525 |
|
|
1526 |
#if 1
|
|
1527 |
__ set(0xdead002, Lmirror);
|
|
1528 |
__ set(0xdead002, L2_scratch);
|
|
1529 |
__ set(0xdead003, L3_scratch);
|
|
1530 |
__ set(0xdead004, L4_scratch);
|
|
1531 |
__ set(0xdead005, Lscratch);
|
|
1532 |
__ set(0xdead006, Lscratch2);
|
|
1533 |
__ set(0xdead007, L7_scratch);
|
|
1534 |
|
|
1535 |
__ set(0xdeaf002, O2);
|
|
1536 |
__ set(0xdeaf003, O3);
|
|
1537 |
__ set(0xdeaf004, O4);
|
|
1538 |
__ set(0xdeaf005, O5);
|
|
1539 |
#endif
|
|
1540 |
|
|
1541 |
// Call interpreter (stack bang complete) enter here if message is
|
|
1542 |
// set and we know stack size is valid
|
|
1543 |
|
|
1544 |
Label call_interpreter_2;
|
|
1545 |
|
|
1546 |
__ bind(call_interpreter_2);
|
|
1547 |
|
|
1548 |
#ifdef ASSERT
|
|
1549 |
{
|
|
1550 |
Label skip;
|
|
1551 |
__ ld_ptr(STATE(_frame_bottom), G3_scratch);
|
|
1552 |
__ cmp(G3_scratch, SP);
|
|
1553 |
__ brx(Assembler::equal, false, Assembler::pt, skip);
|
|
1554 |
__ delayed()->nop();
|
|
1555 |
__ stop("SP not restored to frame bottom");
|
|
1556 |
__ bind(skip);
|
|
1557 |
}
|
|
1558 |
#endif
|
|
1559 |
|
|
1560 |
VALIDATE_STATE(G3_scratch, 4);
|
|
1561 |
__ set_last_Java_frame(SP, noreg);
|
|
1562 |
__ mov(Lstate, O0); // (arg) pointer to current state
|
|
1563 |
|
|
1564 |
__ call(CAST_FROM_FN_PTR(address,
|
|
1565 |
JvmtiExport::can_post_interpreter_events() ?
|
|
1566 |
BytecodeInterpreter::runWithChecks
|
|
1567 |
: BytecodeInterpreter::run),
|
|
1568 |
relocInfo::runtime_call_type);
|
|
1569 |
|
|
1570 |
__ delayed()->nop();
|
|
1571 |
|
|
1572 |
__ ld_ptr(STATE(_thread), G2_thread);
|
|
1573 |
__ reset_last_Java_frame();
|
|
1574 |
|
|
1575 |
// examine msg from interpreter to determine next action
|
|
1576 |
__ ld_ptr(STATE(_thread), G2_thread); // restore G2_thread
|
|
1577 |
|
|
1578 |
__ ld(STATE(_msg), L1_scratch); // Get new message
|
|
1579 |
|
|
1580 |
Label call_method;
|
|
1581 |
Label return_from_interpreted_method;
|
|
1582 |
Label throw_exception;
|
|
1583 |
Label do_OSR;
|
|
1584 |
Label bad_msg;
|
|
1585 |
Label resume_interpreter;
|
|
1586 |
|
|
1587 |
__ cmp(L1_scratch, (int)BytecodeInterpreter::call_method);
|
|
1588 |
__ br(Assembler::equal, false, Assembler::pt, call_method);
|
|
1589 |
__ delayed()->cmp(L1_scratch, (int)BytecodeInterpreter::return_from_method);
|
|
1590 |
__ br(Assembler::equal, false, Assembler::pt, return_from_interpreted_method);
|
|
1591 |
__ delayed()->cmp(L1_scratch, (int)BytecodeInterpreter::throwing_exception);
|
|
1592 |
__ br(Assembler::equal, false, Assembler::pt, throw_exception);
|
|
1593 |
__ delayed()->cmp(L1_scratch, (int)BytecodeInterpreter::do_osr);
|
|
1594 |
__ br(Assembler::equal, false, Assembler::pt, do_OSR);
|
|
1595 |
__ delayed()->cmp(L1_scratch, (int)BytecodeInterpreter::more_monitors);
|
|
1596 |
__ br(Assembler::notEqual, false, Assembler::pt, bad_msg);
|
|
1597 |
|
|
1598 |
// Allocate more monitor space, shuffle expression stack....
|
|
1599 |
|
|
1600 |
generate_more_monitors();
|
|
1601 |
|
|
1602 |
// new monitor slot allocated, resume the interpreter.
|
|
1603 |
|
|
1604 |
__ set((int)BytecodeInterpreter::got_monitors, L1_scratch);
|
|
1605 |
VALIDATE_STATE(G3_scratch, 5);
|
|
1606 |
__ ba(false, call_interpreter);
|
|
1607 |
__ delayed()->st(L1_scratch, STATE(_msg));
|
|
1608 |
|
|
1609 |
// uncommon trap needs to jump to here to enter the interpreter (re-execute current bytecode)
|
|
1610 |
unctrap_frame_manager_entry = __ pc();
|
|
1611 |
|
|
1612 |
// QQQ what message do we send
|
|
1613 |
|
|
1614 |
__ ba(false, call_interpreter);
|
|
1615 |
__ delayed()->ld_ptr(STATE(_frame_bottom), SP); // restore to full stack frame
|
|
1616 |
|
|
1617 |
//=============================================================================
|
|
1618 |
// Returning from a compiled method into a deopted method. The bytecode at the
|
|
1619 |
// bcp has completed. The result of the bytecode is in the native abi (the tosca
|
|
1620 |
// for the template based interpreter). Any stack space that was used by the
|
|
1621 |
// bytecode that has completed has been removed (e.g. parameters for an invoke)
|
|
1622 |
// so all that we have to do is place any pending result on the expression stack
|
|
1623 |
// and resume execution on the next bytecode.
|
|
1624 |
|
|
1625 |
generate_deopt_handling();
|
|
1626 |
|
|
1627 |
// ready to resume the interpreter
|
|
1628 |
|
|
1629 |
__ set((int)BytecodeInterpreter::deopt_resume, L1_scratch);
|
|
1630 |
__ ba(false, call_interpreter);
|
|
1631 |
__ delayed()->st(L1_scratch, STATE(_msg));
|
|
1632 |
|
|
1633 |
// Current frame has caught an exception we need to dispatch to the
|
|
1634 |
// handler. We can get here because a native interpreter frame caught
|
|
1635 |
// an exception in which case there is no handler and we must rethrow
|
|
1636 |
// If it is a vanilla interpreted frame the we simply drop into the
|
|
1637 |
// interpreter and let it do the lookup.
|
|
1638 |
|
|
1639 |
Interpreter::_rethrow_exception_entry = __ pc();
|
|
1640 |
|
|
1641 |
Label return_with_exception;
|
|
1642 |
Label unwind_and_forward;
|
|
1643 |
|
|
1644 |
// O0: exception
|
|
1645 |
// O7: throwing pc
|
|
1646 |
|
|
1647 |
// We want exception in the thread no matter what we ultimately decide about frame type.
|
|
1648 |
|
|
1649 |
Address exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
|
|
1650 |
__ verify_thread();
|
|
1651 |
__ st_ptr(O0, exception_addr);
|
|
1652 |
|
|
1653 |
// get the methodOop
|
|
1654 |
__ ld_ptr(STATE(_method), G5_method);
|
|
1655 |
|
|
1656 |
// if this current frame vanilla or native?
|
|
1657 |
|
|
1658 |
__ ld(access_flags, Gtmp1);
|
|
1659 |
__ btst(JVM_ACC_NATIVE, Gtmp1);
|
|
1660 |
__ br(Assembler::zero, false, Assembler::pt, return_with_exception); // vanilla interpreted frame handle directly
|
|
1661 |
__ delayed()->nop();
|
|
1662 |
|
|
1663 |
// We drop thru to unwind a native interpreted frame with a pending exception
|
|
1664 |
// We jump here for the initial interpreter frame with exception pending
|
|
1665 |
// We unwind the current acivation and forward it to our caller.
|
|
1666 |
|
|
1667 |
__ bind(unwind_and_forward);
|
|
1668 |
|
|
1669 |
// Unwind frame and jump to forward exception. unwinding will place throwing pc in O7
|
|
1670 |
// as expected by forward_exception.
|
|
1671 |
|
|
1672 |
__ restore(FP, G0, SP); // unwind interpreter state frame
|
|
1673 |
__ br(Assembler::always, false, Assembler::pt, StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
|
|
1674 |
__ delayed()->mov(I5_savedSP->after_restore(), SP);
|
|
1675 |
|
|
1676 |
// Return point from a call which returns a result in the native abi
|
|
1677 |
// (c1/c2/jni-native). This result must be processed onto the java
|
|
1678 |
// expression stack.
|
|
1679 |
//
|
|
1680 |
// A pending exception may be present in which case there is no result present
|
|
1681 |
|
|
1682 |
address return_from_native_method = __ pc();
|
|
1683 |
|
|
1684 |
VALIDATE_STATE(G3_scratch, 6);
|
|
1685 |
|
|
1686 |
// Result if any is in native abi result (O0..O1/F0..F1). The java expression
|
|
1687 |
// stack is in the state that the calling convention left it.
|
|
1688 |
// Copy the result from native abi result and place it on java expression stack.
|
|
1689 |
|
|
1690 |
// Current interpreter state is present in Lstate
|
|
1691 |
|
|
1692 |
// Exception pending?
|
|
1693 |
|
|
1694 |
__ ld_ptr(STATE(_frame_bottom), SP); // restore to full stack frame
|
|
1695 |
__ ld_ptr(exception_addr, Lscratch); // get any pending exception
|
|
1696 |
__ tst(Lscratch); // exception pending?
|
|
1697 |
__ brx(Assembler::notZero, false, Assembler::pt, return_with_exception);
|
|
1698 |
__ delayed()->nop();
|
|
1699 |
|
|
1700 |
// Process the native abi result to java expression stack
|
|
1701 |
|
|
1702 |
__ ld_ptr(STATE(_result._to_call._callee), L4_scratch); // called method
|
|
1703 |
__ ld_ptr(STATE(_stack), L1_scratch); // get top of java expr stack
|
|
1704 |
__ lduh(L4_scratch, in_bytes(methodOopDesc::size_of_parameters_offset()), L2_scratch); // get parameter size
|
|
1705 |
__ sll(L2_scratch, LogBytesPerWord, L2_scratch ); // parameter size in bytes
|
|
1706 |
__ add(L1_scratch, L2_scratch, L1_scratch); // stack destination for result
|
370
|
1707 |
__ ld(L4_scratch, in_bytes(methodOopDesc::result_index_offset()), L3_scratch); // called method result type index
|
1
|
1708 |
|
|
1709 |
// tosca is really just native abi
|
|
1710 |
__ set((intptr_t)CppInterpreter::_tosca_to_stack, L4_scratch);
|
|
1711 |
__ sll(L3_scratch, LogBytesPerWord, L3_scratch);
|
|
1712 |
__ ld_ptr(L4_scratch, L3_scratch, Lscratch); // get typed result converter address
|
|
1713 |
__ jmpl(Lscratch, G0, O7); // and convert it
|
|
1714 |
__ delayed()->nop();
|
|
1715 |
|
|
1716 |
// L1_scratch points to top of stack (prepushed)
|
|
1717 |
|
|
1718 |
__ ba(false, resume_interpreter);
|
|
1719 |
__ delayed()->mov(L1_scratch, O1);
|
|
1720 |
|
|
1721 |
// An exception is being caught on return to a vanilla interpreter frame.
|
|
1722 |
// Empty the stack and resume interpreter
|
|
1723 |
|
|
1724 |
__ bind(return_with_exception);
|
|
1725 |
|
|
1726 |
__ ld_ptr(STATE(_frame_bottom), SP); // restore to full stack frame
|
|
1727 |
__ ld_ptr(STATE(_stack_base), O1); // empty java expression stack
|
|
1728 |
__ ba(false, resume_interpreter);
|
|
1729 |
__ delayed()->sub(O1, wordSize, O1); // account for prepush
|
|
1730 |
|
|
1731 |
// Return from interpreted method we return result appropriate to the caller (i.e. "recursive"
|
|
1732 |
// interpreter call, or native) and unwind this interpreter activation.
|
|
1733 |
// All monitors should be unlocked.
|
|
1734 |
|
|
1735 |
__ bind(return_from_interpreted_method);
|
|
1736 |
|
|
1737 |
VALIDATE_STATE(G3_scratch, 7);
|
|
1738 |
|
|
1739 |
Label return_to_initial_caller;
|
|
1740 |
|
|
1741 |
// Interpreted result is on the top of the completed activation expression stack.
|
|
1742 |
// We must return it to the top of the callers stack if caller was interpreted
|
|
1743 |
// otherwise we convert to native abi result and return to call_stub/c1/c2
|
|
1744 |
// The caller's expression stack was truncated by the call however the current activation
|
|
1745 |
// has enough stuff on the stack that we have usable space there no matter what. The
|
|
1746 |
// other thing that makes it easy is that the top of the caller's stack is stored in STATE(_locals)
|
|
1747 |
// for the current activation
|
|
1748 |
|
|
1749 |
__ ld_ptr(STATE(_prev_link), L1_scratch);
|
|
1750 |
__ ld_ptr(STATE(_method), L2_scratch); // get method just executed
|
370
|
1751 |
__ ld(L2_scratch, in_bytes(methodOopDesc::result_index_offset()), L2_scratch);
|
1
|
1752 |
__ tst(L1_scratch);
|
|
1753 |
__ brx(Assembler::zero, false, Assembler::pt, return_to_initial_caller);
|
|
1754 |
__ delayed()->sll(L2_scratch, LogBytesPerWord, L2_scratch);
|
|
1755 |
|
|
1756 |
// Copy result to callers java stack
|
|
1757 |
|
|
1758 |
__ set((intptr_t)CppInterpreter::_stack_to_stack, L4_scratch);
|
|
1759 |
__ ld_ptr(L4_scratch, L2_scratch, Lscratch); // get typed result converter address
|
|
1760 |
__ ld_ptr(STATE(_stack), O0); // current top (prepushed)
|
|
1761 |
__ ld_ptr(STATE(_locals), O1); // stack destination
|
|
1762 |
|
|
1763 |
// O0 - will be source, O1 - will be destination (preserved)
|
|
1764 |
__ jmpl(Lscratch, G0, O7); // and convert it
|
|
1765 |
__ delayed()->add(O0, wordSize, O0); // get source (top of current expr stack)
|
|
1766 |
|
|
1767 |
// O1 == &locals[0]
|
|
1768 |
|
|
1769 |
// Result is now on caller's stack. Just unwind current activation and resume
|
|
1770 |
|
|
1771 |
Label unwind_recursive_activation;
|
|
1772 |
|
|
1773 |
|
|
1774 |
__ bind(unwind_recursive_activation);
|
|
1775 |
|
|
1776 |
// O1 == &locals[0] (really callers stacktop) for activation now returning
|
|
1777 |
// returning to interpreter method from "recursive" interpreter call
|
|
1778 |
// result converter left O1 pointing to top of the( prepushed) java stack for method we are returning
|
|
1779 |
// to. Now all we must do is unwind the state from the completed call
|
|
1780 |
|
|
1781 |
// Must restore stack
|
|
1782 |
VALIDATE_STATE(G3_scratch, 8);
|
|
1783 |
|
|
1784 |
// Return to interpreter method after a method call (interpreted/native/c1/c2) has completed.
|
|
1785 |
// Result if any is already on the caller's stack. All we must do now is remove the now dead
|
|
1786 |
// frame and tell interpreter to resume.
|
|
1787 |
|
|
1788 |
|
|
1789 |
__ mov(O1, I1); // pass back new stack top across activation
|
|
1790 |
// POP FRAME HERE ==================================
|
|
1791 |
__ restore(FP, G0, SP); // unwind interpreter state frame
|
|
1792 |
__ ld_ptr(STATE(_frame_bottom), SP); // restore to full stack frame
|
|
1793 |
|
|
1794 |
|
|
1795 |
// Resume the interpreter. The current frame contains the current interpreter
|
|
1796 |
// state object.
|
|
1797 |
//
|
|
1798 |
// O1 == new java stack pointer
|
|
1799 |
|
|
1800 |
__ bind(resume_interpreter);
|
|
1801 |
VALIDATE_STATE(G3_scratch, 10);
|
|
1802 |
|
|
1803 |
// A frame we have already used before so no need to bang stack so use call_interpreter_2 entry
|
|
1804 |
|
|
1805 |
__ set((int)BytecodeInterpreter::method_resume, L1_scratch);
|
|
1806 |
__ st(L1_scratch, STATE(_msg));
|
|
1807 |
__ ba(false, call_interpreter_2);
|
|
1808 |
__ delayed()->st_ptr(O1, STATE(_stack));
|
|
1809 |
|
|
1810 |
|
|
1811 |
// Fast accessor methods share this entry point.
|
|
1812 |
// This works because frame manager is in the same codelet
|
|
1813 |
// This can either be an entry via call_stub/c1/c2 or a recursive interpreter call
|
|
1814 |
// we need to do a little register fixup here once we distinguish the two of them
|
|
1815 |
if (UseFastAccessorMethods && !synchronized) {
|
|
1816 |
// Call stub_return address still in O7
|
|
1817 |
__ bind(fast_accessor_slow_entry_path);
|
|
1818 |
__ set((intptr_t)return_from_native_method - 8, Gtmp1);
|
|
1819 |
__ cmp(Gtmp1, O7); // returning to interpreter?
|
|
1820 |
__ brx(Assembler::equal, true, Assembler::pt, re_dispatch); // yep
|
|
1821 |
__ delayed()->nop();
|
|
1822 |
__ ba(false, re_dispatch);
|
|
1823 |
__ delayed()->mov(G0, prevState); // initial entry
|
|
1824 |
|
|
1825 |
}
|
|
1826 |
|
|
1827 |
// interpreter returning to native code (call_stub/c1/c2)
|
|
1828 |
// convert result and unwind initial activation
|
|
1829 |
// L2_scratch - scaled result type index
|
|
1830 |
|
|
1831 |
__ bind(return_to_initial_caller);
|
|
1832 |
|
|
1833 |
__ set((intptr_t)CppInterpreter::_stack_to_native_abi, L4_scratch);
|
|
1834 |
__ ld_ptr(L4_scratch, L2_scratch, Lscratch); // get typed result converter address
|
|
1835 |
__ ld_ptr(STATE(_stack), O0); // current top (prepushed)
|
|
1836 |
__ jmpl(Lscratch, G0, O7); // and convert it
|
|
1837 |
__ delayed()->add(O0, wordSize, O0); // get source (top of current expr stack)
|
|
1838 |
|
|
1839 |
Label unwind_initial_activation;
|
|
1840 |
__ bind(unwind_initial_activation);
|
|
1841 |
|
|
1842 |
// RETURN TO CALL_STUB/C1/C2 code (result if any in I0..I1/(F0/..F1)
|
|
1843 |
// we can return here with an exception that wasn't handled by interpreted code
|
|
1844 |
// how does c1/c2 see it on return?
|
|
1845 |
|
|
1846 |
// compute resulting sp before/after args popped depending upon calling convention
|
|
1847 |
// __ ld_ptr(STATE(_saved_sp), Gtmp1);
|
|
1848 |
//
|
|
1849 |
// POP FRAME HERE ==================================
|
|
1850 |
__ restore(FP, G0, SP);
|
|
1851 |
__ retl();
|
|
1852 |
__ delayed()->mov(I5_savedSP->after_restore(), SP);
|
|
1853 |
|
|
1854 |
// OSR request, unwind the current frame and transfer to the OSR entry
|
|
1855 |
// and enter OSR nmethod
|
|
1856 |
|
|
1857 |
__ bind(do_OSR);
|
|
1858 |
Label remove_initial_frame;
|
|
1859 |
__ ld_ptr(STATE(_prev_link), L1_scratch);
|
|
1860 |
__ ld_ptr(STATE(_result._osr._osr_buf), G1_scratch);
|
|
1861 |
|
|
1862 |
// We are going to pop this frame. Is there another interpreter frame underneath
|
|
1863 |
// it or is it callstub/compiled?
|
|
1864 |
|
|
1865 |
__ tst(L1_scratch);
|
|
1866 |
__ brx(Assembler::zero, false, Assembler::pt, remove_initial_frame);
|
|
1867 |
__ delayed()->ld_ptr(STATE(_result._osr._osr_entry), G3_scratch);
|
|
1868 |
|
|
1869 |
// Frame underneath is an interpreter frame simply unwind
|
|
1870 |
// POP FRAME HERE ==================================
|
|
1871 |
__ restore(FP, G0, SP); // unwind interpreter state frame
|
|
1872 |
__ mov(I5_savedSP->after_restore(), SP);
|
|
1873 |
|
|
1874 |
// Since we are now calling native need to change our "return address" from the
|
|
1875 |
// dummy RecursiveInterpreterActivation to a return from native
|
|
1876 |
|
|
1877 |
__ set((intptr_t)return_from_native_method - 8, O7);
|
|
1878 |
|
|
1879 |
__ jmpl(G3_scratch, G0, G0);
|
|
1880 |
__ delayed()->mov(G1_scratch, O0);
|
|
1881 |
|
|
1882 |
__ bind(remove_initial_frame);
|
|
1883 |
|
|
1884 |
// POP FRAME HERE ==================================
|
|
1885 |
__ restore(FP, G0, SP);
|
|
1886 |
__ mov(I5_savedSP->after_restore(), SP);
|
|
1887 |
__ jmpl(G3_scratch, G0, G0);
|
|
1888 |
__ delayed()->mov(G1_scratch, O0);
|
|
1889 |
|
|
1890 |
// Call a new method. All we do is (temporarily) trim the expression stack
|
|
1891 |
// push a return address to bring us back to here and leap to the new entry.
|
|
1892 |
// At this point we have a topmost frame that was allocated by the frame manager
|
|
1893 |
// which contains the current method interpreted state. We trim this frame
|
|
1894 |
// of excess java expression stack entries and then recurse.
|
|
1895 |
|
|
1896 |
__ bind(call_method);
|
|
1897 |
|
|
1898 |
// stack points to next free location and not top element on expression stack
|
|
1899 |
// method expects sp to be pointing to topmost element
|
|
1900 |
|
|
1901 |
__ ld_ptr(STATE(_thread), G2_thread);
|
|
1902 |
__ ld_ptr(STATE(_result._to_call._callee), G5_method);
|
|
1903 |
|
|
1904 |
|
|
1905 |
// SP already takes in to account the 2 extra words we use for slop
|
|
1906 |
// when we call a "static long no_params()" method. So if
|
|
1907 |
// we trim back sp by the amount of unused java expression stack
|
|
1908 |
// there will be automagically the 2 extra words we need.
|
|
1909 |
// We also have to worry about keeping SP aligned.
|
|
1910 |
|
|
1911 |
__ ld_ptr(STATE(_stack), Gargs);
|
|
1912 |
__ ld_ptr(STATE(_stack_limit), L1_scratch);
|
|
1913 |
|
|
1914 |
// compute the unused java stack size
|
|
1915 |
__ sub(Gargs, L1_scratch, L2_scratch); // compute unused space
|
|
1916 |
|
370
|
1917 |
// Round down the unused space to that stack is always 16-byte aligned
|
|
1918 |
// by making the unused space a multiple of the size of two longs.
|
1
|
1919 |
|
370
|
1920 |
__ and3(L2_scratch, -2*BytesPerLong, L2_scratch);
|
1
|
1921 |
|
|
1922 |
// Now trim the stack
|
|
1923 |
__ add(SP, L2_scratch, SP);
|
|
1924 |
|
|
1925 |
|
|
1926 |
// Now point to the final argument (account for prepush)
|
|
1927 |
__ add(Gargs, wordSize, Gargs);
|
|
1928 |
#ifdef ASSERT
|
|
1929 |
// Make sure we have space for the window
|
|
1930 |
__ sub(Gargs, SP, L1_scratch);
|
|
1931 |
__ cmp(L1_scratch, 16*wordSize);
|
|
1932 |
{
|
|
1933 |
Label skip;
|
|
1934 |
__ brx(Assembler::greaterEqual, false, Assembler::pt, skip);
|
|
1935 |
__ delayed()->nop();
|
|
1936 |
__ stop("killed stack");
|
|
1937 |
__ bind(skip);
|
|
1938 |
}
|
|
1939 |
#endif // ASSERT
|
|
1940 |
|
|
1941 |
// Create a new frame where we can store values that make it look like the interpreter
|
|
1942 |
// really recursed.
|
|
1943 |
|
|
1944 |
// prepare to recurse or call specialized entry
|
|
1945 |
|
|
1946 |
// First link the registers we need
|
|
1947 |
|
|
1948 |
// make the pc look good in debugger
|
|
1949 |
__ set(CAST_FROM_FN_PTR(intptr_t, RecursiveInterpreterActivation), O7);
|
|
1950 |
// argument too
|
|
1951 |
__ mov(Lstate, I0);
|
|
1952 |
|
|
1953 |
// Record our sending SP
|
|
1954 |
__ mov(SP, O5_savedSP);
|
|
1955 |
|
|
1956 |
__ ld_ptr(STATE(_result._to_call._callee_entry_point), L2_scratch);
|
|
1957 |
__ set((intptr_t) entry_point, L1_scratch);
|
|
1958 |
__ cmp(L1_scratch, L2_scratch);
|
|
1959 |
__ brx(Assembler::equal, false, Assembler::pt, re_dispatch);
|
|
1960 |
__ delayed()->mov(Lstate, prevState); // link activations
|
|
1961 |
|
|
1962 |
// method uses specialized entry, push a return so we look like call stub setup
|
|
1963 |
// this path will handle fact that result is returned in registers and not
|
|
1964 |
// on the java stack.
|
|
1965 |
|
|
1966 |
__ set((intptr_t)return_from_native_method - 8, O7);
|
|
1967 |
__ jmpl(L2_scratch, G0, G0); // Do specialized entry
|
|
1968 |
__ delayed()->nop();
|
|
1969 |
|
|
1970 |
//
|
|
1971 |
// Bad Message from interpreter
|
|
1972 |
//
|
|
1973 |
__ bind(bad_msg);
|
|
1974 |
__ stop("Bad message from interpreter");
|
|
1975 |
|
|
1976 |
// Interpreted method "returned" with an exception pass it on...
|
|
1977 |
// Pass result, unwind activation and continue/return to interpreter/call_stub
|
|
1978 |
// We handle result (if any) differently based on return to interpreter or call_stub
|
|
1979 |
|
|
1980 |
__ bind(throw_exception);
|
|
1981 |
__ ld_ptr(STATE(_prev_link), L1_scratch);
|
|
1982 |
__ tst(L1_scratch);
|
|
1983 |
__ brx(Assembler::zero, false, Assembler::pt, unwind_and_forward);
|
|
1984 |
__ delayed()->nop();
|
|
1985 |
|
|
1986 |
__ ld_ptr(STATE(_locals), O1); // get result of popping callee's args
|
|
1987 |
__ ba(false, unwind_recursive_activation);
|
|
1988 |
__ delayed()->nop();
|
|
1989 |
|
|
1990 |
interpreter_frame_manager = entry_point;
|
|
1991 |
return entry_point;
|
|
1992 |
}
|
|
1993 |
|
|
1994 |
InterpreterGenerator::InterpreterGenerator(StubQueue* code)
|
|
1995 |
: CppInterpreterGenerator(code) {
|
|
1996 |
generate_all(); // down here so it can be "virtual"
|
|
1997 |
}
|
|
1998 |
|
|
1999 |
|
|
2000 |
static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) {
|
|
2001 |
|
|
2002 |
// Figure out the size of an interpreter frame (in words) given that we have a fully allocated
|
|
2003 |
// expression stack, the callee will have callee_extra_locals (so we can account for
|
|
2004 |
// frame extension) and monitor_size for monitors. Basically we need to calculate
|
|
2005 |
// this exactly like generate_fixed_frame/generate_compute_interpreter_state.
|
|
2006 |
//
|
|
2007 |
//
|
|
2008 |
// The big complicating thing here is that we must ensure that the stack stays properly
|
|
2009 |
// aligned. This would be even uglier if monitor size wasn't modulo what the stack
|
|
2010 |
// needs to be aligned for). We are given that the sp (fp) is already aligned by
|
|
2011 |
// the caller so we must ensure that it is properly aligned for our callee.
|
|
2012 |
//
|
|
2013 |
// Ths c++ interpreter always makes sure that we have a enough extra space on the
|
|
2014 |
// stack at all times to deal with the "stack long no_params()" method issue. This
|
|
2015 |
// is "slop_factor" here.
|
|
2016 |
const int slop_factor = 2;
|
|
2017 |
|
|
2018 |
const int fixed_size = sizeof(BytecodeInterpreter)/wordSize + // interpreter state object
|
|
2019 |
frame::memory_parameter_word_sp_offset; // register save area + param window
|
|
2020 |
return (round_to(max_stack +
|
|
2021 |
slop_factor +
|
|
2022 |
fixed_size +
|
|
2023 |
monitor_size +
|
|
2024 |
(callee_extra_locals * Interpreter::stackElementWords()), WordsPerLong));
|
|
2025 |
|
|
2026 |
}
|
|
2027 |
|
|
2028 |
int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
|
|
2029 |
|
|
2030 |
// See call_stub code
|
|
2031 |
int call_stub_size = round_to(7 + frame::memory_parameter_word_sp_offset,
|
|
2032 |
WordsPerLong); // 7 + register save area
|
|
2033 |
|
|
2034 |
// Save space for one monitor to get into the interpreted method in case
|
|
2035 |
// the method is synchronized
|
|
2036 |
int monitor_size = method->is_synchronized() ?
|
|
2037 |
1*frame::interpreter_frame_monitor_size() : 0;
|
|
2038 |
return size_activation_helper(method->max_locals(), method->max_stack(),
|
|
2039 |
monitor_size) + call_stub_size;
|
|
2040 |
}
|
|
2041 |
|
|
2042 |
void BytecodeInterpreter::layout_interpreterState(interpreterState to_fill,
|
|
2043 |
frame* caller,
|
|
2044 |
frame* current,
|
|
2045 |
methodOop method,
|
|
2046 |
intptr_t* locals,
|
|
2047 |
intptr_t* stack,
|
|
2048 |
intptr_t* stack_base,
|
|
2049 |
intptr_t* monitor_base,
|
|
2050 |
intptr_t* frame_bottom,
|
|
2051 |
bool is_top_frame
|
|
2052 |
)
|
|
2053 |
{
|
|
2054 |
// What about any vtable?
|
|
2055 |
//
|
|
2056 |
to_fill->_thread = JavaThread::current();
|
|
2057 |
// This gets filled in later but make it something recognizable for now
|
|
2058 |
to_fill->_bcp = method->code_base();
|
|
2059 |
to_fill->_locals = locals;
|
|
2060 |
to_fill->_constants = method->constants()->cache();
|
|
2061 |
to_fill->_method = method;
|
|
2062 |
to_fill->_mdx = NULL;
|
|
2063 |
to_fill->_stack = stack;
|
|
2064 |
if (is_top_frame && JavaThread::current()->popframe_forcing_deopt_reexecution() ) {
|
|
2065 |
to_fill->_msg = deopt_resume2;
|
|
2066 |
} else {
|
|
2067 |
to_fill->_msg = method_resume;
|
|
2068 |
}
|
|
2069 |
to_fill->_result._to_call._bcp_advance = 0;
|
|
2070 |
to_fill->_result._to_call._callee_entry_point = NULL; // doesn't matter to anyone
|
|
2071 |
to_fill->_result._to_call._callee = NULL; // doesn't matter to anyone
|
|
2072 |
to_fill->_prev_link = NULL;
|
|
2073 |
|
|
2074 |
// Fill in the registers for the frame
|
|
2075 |
|
|
2076 |
// Need to install _sender_sp. Actually not too hard in C++!
|
|
2077 |
// When the skeletal frames are layed out we fill in a value
|
|
2078 |
// for _sender_sp. That value is only correct for the oldest
|
|
2079 |
// skeletal frame constructed (because there is only a single
|
|
2080 |
// entry for "caller_adjustment". While the skeletal frames
|
|
2081 |
// exist that is good enough. We correct that calculation
|
|
2082 |
// here and get all the frames correct.
|
|
2083 |
|
|
2084 |
// to_fill->_sender_sp = locals - (method->size_of_parameters() - 1);
|
|
2085 |
|
|
2086 |
*current->register_addr(Lstate) = (intptr_t) to_fill;
|
|
2087 |
// skeletal already places a useful value here and this doesn't account
|
|
2088 |
// for alignment so don't bother.
|
|
2089 |
// *current->register_addr(I5_savedSP) = (intptr_t) locals - (method->size_of_parameters() - 1);
|
|
2090 |
|
|
2091 |
if (caller->is_interpreted_frame()) {
|
|
2092 |
interpreterState prev = caller->get_interpreterState();
|
|
2093 |
to_fill->_prev_link = prev;
|
|
2094 |
// Make the prev callee look proper
|
|
2095 |
prev->_result._to_call._callee = method;
|
|
2096 |
if (*prev->_bcp == Bytecodes::_invokeinterface) {
|
|
2097 |
prev->_result._to_call._bcp_advance = 5;
|
|
2098 |
} else {
|
|
2099 |
prev->_result._to_call._bcp_advance = 3;
|
|
2100 |
}
|
|
2101 |
}
|
|
2102 |
to_fill->_oop_temp = NULL;
|
|
2103 |
to_fill->_stack_base = stack_base;
|
|
2104 |
// Need +1 here because stack_base points to the word just above the first expr stack entry
|
|
2105 |
// and stack_limit is supposed to point to the word just below the last expr stack entry.
|
|
2106 |
// See generate_compute_interpreter_state.
|
|
2107 |
to_fill->_stack_limit = stack_base - (method->max_stack() + 1);
|
|
2108 |
to_fill->_monitor_base = (BasicObjectLock*) monitor_base;
|
|
2109 |
|
|
2110 |
// sparc specific
|
|
2111 |
to_fill->_frame_bottom = frame_bottom;
|
|
2112 |
to_fill->_self_link = to_fill;
|
|
2113 |
#ifdef ASSERT
|
|
2114 |
to_fill->_native_fresult = 123456.789;
|
|
2115 |
to_fill->_native_lresult = CONST64(0xdeadcafedeafcafe);
|
|
2116 |
#endif
|
|
2117 |
}
|
|
2118 |
|
|
2119 |
void BytecodeInterpreter::pd_layout_interpreterState(interpreterState istate, address last_Java_pc, intptr_t* last_Java_fp) {
|
|
2120 |
istate->_last_Java_pc = (intptr_t*) last_Java_pc;
|
|
2121 |
}
|
|
2122 |
|
|
2123 |
|
|
2124 |
int AbstractInterpreter::layout_activation(methodOop method,
|
|
2125 |
int tempcount, // Number of slots on java expression stack in use
|
|
2126 |
int popframe_extra_args,
|
|
2127 |
int moncount, // Number of active monitors
|
|
2128 |
int callee_param_size,
|
|
2129 |
int callee_locals_size,
|
|
2130 |
frame* caller,
|
|
2131 |
frame* interpreter_frame,
|
|
2132 |
bool is_top_frame) {
|
|
2133 |
|
|
2134 |
assert(popframe_extra_args == 0, "NEED TO FIX");
|
|
2135 |
// NOTE this code must exactly mimic what InterpreterGenerator::generate_compute_interpreter_state()
|
|
2136 |
// does as far as allocating an interpreter frame.
|
|
2137 |
// If interpreter_frame!=NULL, set up the method, locals, and monitors.
|
|
2138 |
// The frame interpreter_frame, if not NULL, is guaranteed to be the right size,
|
|
2139 |
// as determined by a previous call to this method.
|
|
2140 |
// It is also guaranteed to be walkable even though it is in a skeletal state
|
|
2141 |
// NOTE: return size is in words not bytes
|
|
2142 |
// NOTE: tempcount is the current size of the java expression stack. For top most
|
|
2143 |
// frames we will allocate a full sized expression stack and not the curback
|
|
2144 |
// version that non-top frames have.
|
|
2145 |
|
|
2146 |
// Calculate the amount our frame will be adjust by the callee. For top frame
|
|
2147 |
// this is zero.
|
|
2148 |
|
|
2149 |
// NOTE: ia64 seems to do this wrong (or at least backwards) in that it
|
|
2150 |
// calculates the extra locals based on itself. Not what the callee does
|
|
2151 |
// to it. So it ignores last_frame_adjust value. Seems suspicious as far
|
|
2152 |
// as getting sender_sp correct.
|
|
2153 |
|
|
2154 |
int extra_locals_size = callee_locals_size - callee_param_size;
|
|
2155 |
int monitor_size = (sizeof(BasicObjectLock) * moncount) / wordSize;
|
|
2156 |
int full_frame_words = size_activation_helper(extra_locals_size, method->max_stack(), monitor_size);
|
|
2157 |
int short_frame_words = size_activation_helper(extra_locals_size, method->max_stack(), monitor_size);
|
|
2158 |
int frame_words = is_top_frame ? full_frame_words : short_frame_words;
|
|
2159 |
|
|
2160 |
|
|
2161 |
/*
|
|
2162 |
if we actually have a frame to layout we must now fill in all the pieces. This means both
|
|
2163 |
the interpreterState and the registers.
|
|
2164 |
*/
|
|
2165 |
if (interpreter_frame != NULL) {
|
|
2166 |
|
|
2167 |
// MUCHO HACK
|
|
2168 |
|
|
2169 |
intptr_t* frame_bottom = interpreter_frame->sp() - (full_frame_words - frame_words);
|
370
|
2170 |
// 'interpreter_frame->sp()' is unbiased while 'frame_bottom' must be a biased value in 64bit mode.
|
|
2171 |
assert(((intptr_t)frame_bottom & 0xf) == 0, "SP biased in layout_activation");
|
|
2172 |
frame_bottom = (intptr_t*)((intptr_t)frame_bottom - STACK_BIAS);
|
1
|
2173 |
|
|
2174 |
/* Now fillin the interpreterState object */
|
|
2175 |
|
|
2176 |
interpreterState cur_state = (interpreterState) ((intptr_t)interpreter_frame->fp() - sizeof(BytecodeInterpreter));
|
|
2177 |
|
|
2178 |
|
|
2179 |
intptr_t* locals;
|
|
2180 |
|
|
2181 |
// Calculate the postion of locals[0]. This is painful because of
|
|
2182 |
// stack alignment (same as ia64). The problem is that we can
|
|
2183 |
// not compute the location of locals from fp(). fp() will account
|
|
2184 |
// for the extra locals but it also accounts for aligning the stack
|
|
2185 |
// and we can't determine if the locals[0] was misaligned but max_locals
|
|
2186 |
// was enough to have the
|
|
2187 |
// calculate postion of locals. fp already accounts for extra locals.
|
|
2188 |
// +2 for the static long no_params() issue.
|
|
2189 |
|
|
2190 |
if (caller->is_interpreted_frame()) {
|
|
2191 |
// locals must agree with the caller because it will be used to set the
|
|
2192 |
// caller's tos when we return.
|
|
2193 |
interpreterState prev = caller->get_interpreterState();
|
|
2194 |
// stack() is prepushed.
|
|
2195 |
locals = prev->stack() + method->size_of_parameters();
|
|
2196 |
} else {
|
|
2197 |
// Lay out locals block in the caller adjacent to the register window save area.
|
|
2198 |
//
|
|
2199 |
// Compiled frames do not allocate a varargs area which is why this if
|
|
2200 |
// statement is needed.
|
|
2201 |
//
|
|
2202 |
intptr_t* fp = interpreter_frame->fp();
|
|
2203 |
int local_words = method->max_locals() * Interpreter::stackElementWords();
|
|
2204 |
|
|
2205 |
if (caller->is_compiled_frame()) {
|
|
2206 |
locals = fp + frame::register_save_words + local_words - 1;
|
|
2207 |
} else {
|
|
2208 |
locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1;
|
|
2209 |
}
|
|
2210 |
|
|
2211 |
}
|
|
2212 |
// END MUCHO HACK
|
|
2213 |
|
|
2214 |
intptr_t* monitor_base = (intptr_t*) cur_state;
|
|
2215 |
intptr_t* stack_base = monitor_base - monitor_size;
|
|
2216 |
/* +1 because stack is always prepushed */
|
|
2217 |
intptr_t* stack = stack_base - (tempcount + 1);
|
|
2218 |
|
|
2219 |
|
|
2220 |
BytecodeInterpreter::layout_interpreterState(cur_state,
|
|
2221 |
caller,
|
|
2222 |
interpreter_frame,
|
|
2223 |
method,
|
|
2224 |
locals,
|
|
2225 |
stack,
|
|
2226 |
stack_base,
|
|
2227 |
monitor_base,
|
|
2228 |
frame_bottom,
|
|
2229 |
is_top_frame);
|
|
2230 |
|
|
2231 |
BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address, interpreter_frame->fp());
|
|
2232 |
|
|
2233 |
}
|
|
2234 |
return frame_words;
|
|
2235 |
}
|
|
2236 |
|
|
2237 |
#endif // CC_INTERP
|