22 * questions. |
22 * questions. |
23 * |
23 * |
24 */ |
24 */ |
25 |
25 |
26 #include "precompiled.hpp" |
26 #include "precompiled.hpp" |
27 #ifndef CC_INTERP |
|
28 #include "asm/macroAssembler.inline.hpp" |
|
29 #include "interpreter/bytecodeHistogram.hpp" |
|
30 #include "interpreter/interpreter.hpp" |
27 #include "interpreter/interpreter.hpp" |
31 #include "interpreter/interpreterGenerator.hpp" |
28 #include "oops/constMethod.hpp" |
32 #include "interpreter/interpreterRuntime.hpp" |
|
33 #include "interpreter/interp_masm.hpp" |
|
34 #include "interpreter/templateTable.hpp" |
|
35 #include "oops/arrayOop.hpp" |
|
36 #include "oops/methodData.hpp" |
|
37 #include "oops/method.hpp" |
29 #include "oops/method.hpp" |
38 #include "oops/oop.inline.hpp" |
|
39 #include "prims/jvmtiExport.hpp" |
|
40 #include "prims/jvmtiThreadState.hpp" |
|
41 #include "runtime/arguments.hpp" |
|
42 #include "runtime/deoptimization.hpp" |
|
43 #include "runtime/frame.inline.hpp" |
30 #include "runtime/frame.inline.hpp" |
44 #include "runtime/sharedRuntime.hpp" |
|
45 #include "runtime/stubRoutines.hpp" |
|
46 #include "runtime/synchronizer.hpp" |
|
47 #include "runtime/timer.hpp" |
|
48 #include "runtime/vframeArray.hpp" |
|
49 #include "utilities/debug.hpp" |
31 #include "utilities/debug.hpp" |
50 #include "utilities/macros.hpp" |
32 #include "utilities/macros.hpp" |
51 |
33 |
52 #undef __ |
|
53 #define __ _masm-> |
|
54 |
34 |
55 #ifdef PRODUCT |
35 int AbstractInterpreter::BasicType_as_index(BasicType type) { |
56 #define BLOCK_COMMENT(str) /* nothing */ |
36 int i = 0; |
57 #else |
37 switch (type) { |
58 #define BLOCK_COMMENT(str) __ block_comment(str) |
38 case T_BOOLEAN: i = 0; break; |
59 #endif |
39 case T_CHAR : i = 1; break; |
60 |
40 case T_BYTE : i = 2; break; |
61 #define BIND(label) __ bind(label); BLOCK_COMMENT(#label ":") |
41 case T_SHORT : i = 3; break; |
62 |
42 case T_INT : i = 4; break; |
63 //----------------------------------------------------------------------------- |
43 case T_LONG : i = 5; break; |
64 |
44 case T_VOID : i = 6; break; |
65 // Actually we should never reach here since we do stack overflow checks before pushing any frame. |
45 case T_FLOAT : i = 7; break; |
66 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { |
46 case T_DOUBLE : i = 8; break; |
67 address entry = __ pc(); |
47 case T_OBJECT : i = 9; break; |
68 __ unimplemented("generate_StackOverflowError_handler"); |
48 case T_ARRAY : i = 9; break; |
69 return entry; |
49 default : ShouldNotReachHere(); |
70 } |
|
71 |
|
72 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) { |
|
73 address entry = __ pc(); |
|
74 __ empty_expression_stack(); |
|
75 __ load_const_optimized(R4_ARG2, (address) name); |
|
76 // Index is in R17_tos. |
|
77 __ mr(R5_ARG3, R17_tos); |
|
78 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException)); |
|
79 return entry; |
|
80 } |
|
81 |
|
82 #if 0 |
|
83 // Call special ClassCastException constructor taking object to cast |
|
84 // and target class as arguments. |
|
85 address TemplateInterpreterGenerator::generate_ClassCastException_verbose_handler() { |
|
86 address entry = __ pc(); |
|
87 |
|
88 // Expression stack must be empty before entering the VM if an |
|
89 // exception happened. |
|
90 __ empty_expression_stack(); |
|
91 |
|
92 // Thread will be loaded to R3_ARG1. |
|
93 // Target class oop is in register R5_ARG3 by convention! |
|
94 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose), R17_tos, R5_ARG3); |
|
95 // Above call must not return here since exception pending. |
|
96 DEBUG_ONLY(__ should_not_reach_here();) |
|
97 return entry; |
|
98 } |
|
99 #endif |
|
100 |
|
101 address TemplateInterpreterGenerator::generate_ClassCastException_handler() { |
|
102 address entry = __ pc(); |
|
103 // Expression stack must be empty before entering the VM if an |
|
104 // exception happened. |
|
105 __ empty_expression_stack(); |
|
106 |
|
107 // Load exception object. |
|
108 // Thread will be loaded to R3_ARG1. |
|
109 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException), R17_tos); |
|
110 #ifdef ASSERT |
|
111 // Above call must not return here since exception pending. |
|
112 __ should_not_reach_here(); |
|
113 #endif |
|
114 return entry; |
|
115 } |
|
116 |
|
117 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) { |
|
118 address entry = __ pc(); |
|
119 //__ untested("generate_exception_handler_common"); |
|
120 Register Rexception = R17_tos; |
|
121 |
|
122 // Expression stack must be empty before entering the VM if an exception happened. |
|
123 __ empty_expression_stack(); |
|
124 |
|
125 __ load_const_optimized(R4_ARG2, (address) name, R11_scratch1); |
|
126 if (pass_oop) { |
|
127 __ mr(R5_ARG3, Rexception); |
|
128 __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), false); |
|
129 } else { |
|
130 __ load_const_optimized(R5_ARG3, (address) message, R11_scratch1); |
|
131 __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), false); |
|
132 } |
50 } |
133 |
51 assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds"); |
134 // Throw exception. |
52 return i; |
135 __ mr(R3_ARG1, Rexception); |
|
136 __ load_const_optimized(R11_scratch1, Interpreter::throw_exception_entry(), R12_scratch2); |
|
137 __ mtctr(R11_scratch1); |
|
138 __ bctr(); |
|
139 |
|
140 return entry; |
|
141 } |
|
142 |
|
143 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { |
|
144 address entry = __ pc(); |
|
145 __ unimplemented("generate_continuation_for"); |
|
146 return entry; |
|
147 } |
|
148 |
|
149 // This entry is returned to when a call returns to the interpreter. |
|
150 // When we arrive here, we expect that the callee stack frame is already popped. |
|
151 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { |
|
152 address entry = __ pc(); |
|
153 |
|
154 // Move the value out of the return register back to the TOS cache of current frame. |
|
155 switch (state) { |
|
156 case ltos: |
|
157 case btos: |
|
158 case ctos: |
|
159 case stos: |
|
160 case atos: |
|
161 case itos: __ mr(R17_tos, R3_RET); break; // RET -> TOS cache |
|
162 case ftos: |
|
163 case dtos: __ fmr(F15_ftos, F1_RET); break; // TOS cache -> GR_FRET |
|
164 case vtos: break; // Nothing to do, this was a void return. |
|
165 default : ShouldNotReachHere(); |
|
166 } |
|
167 |
|
168 __ restore_interpreter_state(R11_scratch1); // Sets R11_scratch1 = fp. |
|
169 __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1); |
|
170 __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0); |
|
171 |
|
172 // Compiled code destroys templateTableBase, reload. |
|
173 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R12_scratch2); |
|
174 |
|
175 if (state == atos) { |
|
176 __ profile_return_type(R3_RET, R11_scratch1, R12_scratch2); |
|
177 } |
|
178 |
|
179 const Register cache = R11_scratch1; |
|
180 const Register size = R12_scratch2; |
|
181 __ get_cache_and_index_at_bcp(cache, 1, index_size); |
|
182 |
|
183 // Get least significant byte of 64 bit value: |
|
184 #if defined(VM_LITTLE_ENDIAN) |
|
185 __ lbz(size, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()), cache); |
|
186 #else |
|
187 __ lbz(size, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()) + 7, cache); |
|
188 #endif |
|
189 __ sldi(size, size, Interpreter::logStackElementSize); |
|
190 __ add(R15_esp, R15_esp, size); |
|
191 __ dispatch_next(state, step); |
|
192 return entry; |
|
193 } |
|
194 |
|
195 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) { |
|
196 address entry = __ pc(); |
|
197 // If state != vtos, we're returning from a native method, which put it's result |
|
198 // into the result register. So move the value out of the return register back |
|
199 // to the TOS cache of current frame. |
|
200 |
|
201 switch (state) { |
|
202 case ltos: |
|
203 case btos: |
|
204 case ctos: |
|
205 case stos: |
|
206 case atos: |
|
207 case itos: __ mr(R17_tos, R3_RET); break; // GR_RET -> TOS cache |
|
208 case ftos: |
|
209 case dtos: __ fmr(F15_ftos, F1_RET); break; // TOS cache -> GR_FRET |
|
210 case vtos: break; // Nothing to do, this was a void return. |
|
211 default : ShouldNotReachHere(); |
|
212 } |
|
213 |
|
214 // Load LcpoolCache @@@ should be already set! |
|
215 __ get_constant_pool_cache(R27_constPoolCache); |
|
216 |
|
217 // Handle a pending exception, fall through if none. |
|
218 __ check_and_forward_exception(R11_scratch1, R12_scratch2); |
|
219 |
|
220 // Start executing bytecodes. |
|
221 __ dispatch_next(state, step); |
|
222 |
|
223 return entry; |
|
224 } |
|
225 |
|
226 // A result handler converts the native result into java format. |
|
227 // Use the shared code between c++ and template interpreter. |
|
228 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) { |
|
229 return AbstractInterpreterGenerator::generate_result_handler_for(type); |
|
230 } |
|
231 |
|
232 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) { |
|
233 address entry = __ pc(); |
|
234 |
|
235 __ push(state); |
|
236 __ call_VM(noreg, runtime_entry); |
|
237 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); |
|
238 |
|
239 return entry; |
|
240 } |
|
241 |
|
242 // Helpers for commoning out cases in the various type of method entries. |
|
243 |
|
244 // Increment invocation count & check for overflow. |
|
245 // |
|
246 // Note: checking for negative value instead of overflow |
|
247 // so we have a 'sticky' overflow test. |
|
248 // |
|
249 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { |
|
250 // Note: In tiered we increment either counters in method or in MDO depending if we're profiling or not. |
|
251 Register Rscratch1 = R11_scratch1; |
|
252 Register Rscratch2 = R12_scratch2; |
|
253 Register R3_counters = R3_ARG1; |
|
254 Label done; |
|
255 |
|
256 if (TieredCompilation) { |
|
257 const int increment = InvocationCounter::count_increment; |
|
258 const int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift; |
|
259 Label no_mdo; |
|
260 if (ProfileInterpreter) { |
|
261 const Register Rmdo = Rscratch1; |
|
262 // If no method data exists, go to profile_continue. |
|
263 __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method); |
|
264 __ cmpdi(CCR0, Rmdo, 0); |
|
265 __ beq(CCR0, no_mdo); |
|
266 |
|
267 // Increment backedge counter in the MDO. |
|
268 const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); |
|
269 __ lwz(Rscratch2, mdo_bc_offs, Rmdo); |
|
270 __ addi(Rscratch2, Rscratch2, increment); |
|
271 __ stw(Rscratch2, mdo_bc_offs, Rmdo); |
|
272 __ load_const_optimized(Rscratch1, mask, R0); |
|
273 __ and_(Rscratch1, Rscratch2, Rscratch1); |
|
274 __ bne(CCR0, done); |
|
275 __ b(*overflow); |
|
276 } |
|
277 |
|
278 // Increment counter in MethodCounters*. |
|
279 const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); |
|
280 __ bind(no_mdo); |
|
281 __ get_method_counters(R19_method, R3_counters, done); |
|
282 __ lwz(Rscratch2, mo_bc_offs, R3_counters); |
|
283 __ addi(Rscratch2, Rscratch2, increment); |
|
284 __ stw(Rscratch2, mo_bc_offs, R3_counters); |
|
285 __ load_const_optimized(Rscratch1, mask, R0); |
|
286 __ and_(Rscratch1, Rscratch2, Rscratch1); |
|
287 __ beq(CCR0, *overflow); |
|
288 |
|
289 __ bind(done); |
|
290 |
|
291 } else { |
|
292 |
|
293 // Update standard invocation counters. |
|
294 Register Rsum_ivc_bec = R4_ARG2; |
|
295 __ get_method_counters(R19_method, R3_counters, done); |
|
296 __ increment_invocation_counter(R3_counters, Rsum_ivc_bec, R12_scratch2); |
|
297 // Increment interpreter invocation counter. |
|
298 if (ProfileInterpreter) { // %%% Merge this into methodDataOop. |
|
299 __ lwz(R12_scratch2, in_bytes(MethodCounters::interpreter_invocation_counter_offset()), R3_counters); |
|
300 __ addi(R12_scratch2, R12_scratch2, 1); |
|
301 __ stw(R12_scratch2, in_bytes(MethodCounters::interpreter_invocation_counter_offset()), R3_counters); |
|
302 } |
|
303 // Check if we must create a method data obj. |
|
304 if (ProfileInterpreter && profile_method != NULL) { |
|
305 const Register profile_limit = Rscratch1; |
|
306 int pl_offs = __ load_const_optimized(profile_limit, &InvocationCounter::InterpreterProfileLimit, R0, true); |
|
307 __ lwz(profile_limit, pl_offs, profile_limit); |
|
308 // Test to see if we should create a method data oop. |
|
309 __ cmpw(CCR0, Rsum_ivc_bec, profile_limit); |
|
310 __ blt(CCR0, *profile_method_continue); |
|
311 // If no method data exists, go to profile_method. |
|
312 __ test_method_data_pointer(*profile_method); |
|
313 } |
|
314 // Finally check for counter overflow. |
|
315 if (overflow) { |
|
316 const Register invocation_limit = Rscratch1; |
|
317 int il_offs = __ load_const_optimized(invocation_limit, &InvocationCounter::InterpreterInvocationLimit, R0, true); |
|
318 __ lwz(invocation_limit, il_offs, invocation_limit); |
|
319 assert(4 == sizeof(InvocationCounter::InterpreterInvocationLimit), "unexpected field size"); |
|
320 __ cmpw(CCR0, Rsum_ivc_bec, invocation_limit); |
|
321 __ bge(CCR0, *overflow); |
|
322 } |
|
323 |
|
324 __ bind(done); |
|
325 } |
|
326 } |
|
327 |
|
328 // Generate code to initiate compilation on invocation counter overflow. |
|
329 void TemplateInterpreterGenerator::generate_counter_overflow(Label& continue_entry) { |
|
330 // Generate code to initiate compilation on the counter overflow. |
|
331 |
|
332 // InterpreterRuntime::frequency_counter_overflow takes one arguments, |
|
333 // which indicates if the counter overflow occurs at a backwards branch (NULL bcp) |
|
334 // We pass zero in. |
|
335 // The call returns the address of the verified entry point for the method or NULL |
|
336 // if the compilation did not complete (either went background or bailed out). |
|
337 // |
|
338 // Unlike the C++ interpreter above: Check exceptions! |
|
339 // Assumption: Caller must set the flag "do_not_unlock_if_sychronized" if the monitor of a sync'ed |
|
340 // method has not yet been created. Thus, no unlocking of a non-existing monitor can occur. |
|
341 |
|
342 __ li(R4_ARG2, 0); |
|
343 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R4_ARG2, true); |
|
344 |
|
345 // Returns verified_entry_point or NULL. |
|
346 // We ignore it in any case. |
|
347 __ b(continue_entry); |
|
348 } |
|
349 |
|
350 void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rmem_frame_size, Register Rscratch1) { |
|
351 assert_different_registers(Rmem_frame_size, Rscratch1); |
|
352 __ generate_stack_overflow_check_with_compare_and_throw(Rmem_frame_size, Rscratch1); |
|
353 } |
|
354 |
|
355 void TemplateInterpreterGenerator::unlock_method(bool check_exceptions) { |
|
356 __ unlock_object(R26_monitor, check_exceptions); |
|
357 } |
|
358 |
|
359 // Lock the current method, interpreter register window must be set up! |
|
360 void TemplateInterpreterGenerator::lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded) { |
|
361 const Register Robj_to_lock = Rscratch2; |
|
362 |
|
363 { |
|
364 if (!flags_preloaded) { |
|
365 __ lwz(Rflags, method_(access_flags)); |
|
366 } |
|
367 |
|
368 #ifdef ASSERT |
|
369 // Check if methods needs synchronization. |
|
370 { |
|
371 Label Lok; |
|
372 __ testbitdi(CCR0, R0, Rflags, JVM_ACC_SYNCHRONIZED_BIT); |
|
373 __ btrue(CCR0,Lok); |
|
374 __ stop("method doesn't need synchronization"); |
|
375 __ bind(Lok); |
|
376 } |
|
377 #endif // ASSERT |
|
378 } |
|
379 |
|
380 // Get synchronization object to Rscratch2. |
|
381 { |
|
382 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); |
|
383 Label Lstatic; |
|
384 Label Ldone; |
|
385 |
|
386 __ testbitdi(CCR0, R0, Rflags, JVM_ACC_STATIC_BIT); |
|
387 __ btrue(CCR0, Lstatic); |
|
388 |
|
389 // Non-static case: load receiver obj from stack and we're done. |
|
390 __ ld(Robj_to_lock, R18_locals); |
|
391 __ b(Ldone); |
|
392 |
|
393 __ bind(Lstatic); // Static case: Lock the java mirror |
|
394 __ ld(Robj_to_lock, in_bytes(Method::const_offset()), R19_method); |
|
395 __ ld(Robj_to_lock, in_bytes(ConstMethod::constants_offset()), Robj_to_lock); |
|
396 __ ld(Robj_to_lock, ConstantPool::pool_holder_offset_in_bytes(), Robj_to_lock); |
|
397 __ ld(Robj_to_lock, mirror_offset, Robj_to_lock); |
|
398 |
|
399 __ bind(Ldone); |
|
400 __ verify_oop(Robj_to_lock); |
|
401 } |
|
402 |
|
403 // Got the oop to lock => execute! |
|
404 __ add_monitor_to_stack(true, Rscratch1, R0); |
|
405 |
|
406 __ std(Robj_to_lock, BasicObjectLock::obj_offset_in_bytes(), R26_monitor); |
|
407 __ lock_object(R26_monitor, Robj_to_lock); |
|
408 } |
|
409 |
|
410 // Generate a fixed interpreter frame for pure interpreter |
|
411 // and I2N native transition frames. |
|
412 // |
|
413 // Before (stack grows downwards): |
|
414 // |
|
415 // | ... | |
|
416 // |------------- | |
|
417 // | java arg0 | |
|
418 // | ... | |
|
419 // | java argn | |
|
420 // | | <- R15_esp |
|
421 // | | |
|
422 // |--------------| |
|
423 // | abi_112 | |
|
424 // | | <- R1_SP |
|
425 // |==============| |
|
426 // |
|
427 // |
|
428 // After: |
|
429 // |
|
430 // | ... | |
|
431 // | java arg0 |<- R18_locals |
|
432 // | ... | |
|
433 // | java argn | |
|
434 // |--------------| |
|
435 // | | |
|
436 // | java locals | |
|
437 // | | |
|
438 // |--------------| |
|
439 // | abi_48 | |
|
440 // |==============| |
|
441 // | | |
|
442 // | istate | |
|
443 // | | |
|
444 // |--------------| |
|
445 // | monitor |<- R26_monitor |
|
446 // |--------------| |
|
447 // | |<- R15_esp |
|
448 // | expression | |
|
449 // | stack | |
|
450 // | | |
|
451 // |--------------| |
|
452 // | | |
|
453 // | abi_112 |<- R1_SP |
|
454 // |==============| |
|
455 // |
|
456 // The top most frame needs an abi space of 112 bytes. This space is needed, |
|
457 // since we call to c. The c function may spill their arguments to the caller |
|
458 // frame. When we call to java, we don't need these spill slots. In order to save |
|
459 // space on the stack, we resize the caller. However, java local reside in |
|
460 // the caller frame and the frame has to be increased. The frame_size for the |
|
461 // current frame was calculated based on max_stack as size for the expression |
|
462 // stack. At the call, just a part of the expression stack might be used. |
|
463 // We don't want to waste this space and cut the frame back accordingly. |
|
464 // The resulting amount for resizing is calculated as follows: |
|
465 // resize = (number_of_locals - number_of_arguments) * slot_size |
|
466 // + (R1_SP - R15_esp) + 48 |
|
467 // |
|
468 // The size for the callee frame is calculated: |
|
469 // framesize = 112 + max_stack + monitor + state_size |
|
470 // |
|
471 // maxstack: Max number of slots on the expression stack, loaded from the method. |
|
472 // monitor: We statically reserve room for one monitor object. |
|
473 // state_size: We save the current state of the interpreter to this area. |
|
474 // |
|
475 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call, Register Rsize_of_parameters, Register Rsize_of_locals) { |
|
476 Register parent_frame_resize = R6_ARG4, // Frame will grow by this number of bytes. |
|
477 top_frame_size = R7_ARG5, |
|
478 Rconst_method = R8_ARG6; |
|
479 |
|
480 assert_different_registers(Rsize_of_parameters, Rsize_of_locals, parent_frame_resize, top_frame_size); |
|
481 |
|
482 __ ld(Rconst_method, method_(const)); |
|
483 __ lhz(Rsize_of_parameters /* number of params */, |
|
484 in_bytes(ConstMethod::size_of_parameters_offset()), Rconst_method); |
|
485 if (native_call) { |
|
486 // If we're calling a native method, we reserve space for the worst-case signature |
|
487 // handler varargs vector, which is max(Argument::n_register_parameters, parameter_count+2). |
|
488 // We add two slots to the parameter_count, one for the jni |
|
489 // environment and one for a possible native mirror. |
|
490 Label skip_native_calculate_max_stack; |
|
491 __ addi(top_frame_size, Rsize_of_parameters, 2); |
|
492 __ cmpwi(CCR0, top_frame_size, Argument::n_register_parameters); |
|
493 __ bge(CCR0, skip_native_calculate_max_stack); |
|
494 __ li(top_frame_size, Argument::n_register_parameters); |
|
495 __ bind(skip_native_calculate_max_stack); |
|
496 __ sldi(Rsize_of_parameters, Rsize_of_parameters, Interpreter::logStackElementSize); |
|
497 __ sldi(top_frame_size, top_frame_size, Interpreter::logStackElementSize); |
|
498 __ sub(parent_frame_resize, R1_SP, R15_esp); // <0, off by Interpreter::stackElementSize! |
|
499 assert(Rsize_of_locals == noreg, "Rsize_of_locals not initialized"); // Only relevant value is Rsize_of_parameters. |
|
500 } else { |
|
501 __ lhz(Rsize_of_locals /* number of params */, in_bytes(ConstMethod::size_of_locals_offset()), Rconst_method); |
|
502 __ sldi(Rsize_of_parameters, Rsize_of_parameters, Interpreter::logStackElementSize); |
|
503 __ sldi(Rsize_of_locals, Rsize_of_locals, Interpreter::logStackElementSize); |
|
504 __ lhz(top_frame_size, in_bytes(ConstMethod::max_stack_offset()), Rconst_method); |
|
505 __ sub(R11_scratch1, Rsize_of_locals, Rsize_of_parameters); // >=0 |
|
506 __ sub(parent_frame_resize, R1_SP, R15_esp); // <0, off by Interpreter::stackElementSize! |
|
507 __ sldi(top_frame_size, top_frame_size, Interpreter::logStackElementSize); |
|
508 __ add(parent_frame_resize, parent_frame_resize, R11_scratch1); |
|
509 } |
|
510 |
|
511 // Compute top frame size. |
|
512 __ addi(top_frame_size, top_frame_size, frame::abi_reg_args_size + frame::ijava_state_size); |
|
513 |
|
514 // Cut back area between esp and max_stack. |
|
515 __ addi(parent_frame_resize, parent_frame_resize, frame::abi_minframe_size - Interpreter::stackElementSize); |
|
516 |
|
517 __ round_to(top_frame_size, frame::alignment_in_bytes); |
|
518 __ round_to(parent_frame_resize, frame::alignment_in_bytes); |
|
519 // parent_frame_resize = (locals-parameters) - (ESP-SP-ABI48) Rounded to frame alignment size. |
|
520 // Enlarge by locals-parameters (not in case of native_call), shrink by ESP-SP-ABI48. |
|
521 |
|
522 { |
|
523 // -------------------------------------------------------------------------- |
|
524 // Stack overflow check |
|
525 |
|
526 Label cont; |
|
527 __ add(R11_scratch1, parent_frame_resize, top_frame_size); |
|
528 generate_stack_overflow_check(R11_scratch1, R12_scratch2); |
|
529 } |
|
530 |
|
531 // Set up interpreter state registers. |
|
532 |
|
533 __ add(R18_locals, R15_esp, Rsize_of_parameters); |
|
534 __ ld(R27_constPoolCache, in_bytes(ConstMethod::constants_offset()), Rconst_method); |
|
535 __ ld(R27_constPoolCache, ConstantPool::cache_offset_in_bytes(), R27_constPoolCache); |
|
536 |
|
537 // Set method data pointer. |
|
538 if (ProfileInterpreter) { |
|
539 Label zero_continue; |
|
540 __ ld(R28_mdx, method_(method_data)); |
|
541 __ cmpdi(CCR0, R28_mdx, 0); |
|
542 __ beq(CCR0, zero_continue); |
|
543 __ addi(R28_mdx, R28_mdx, in_bytes(MethodData::data_offset())); |
|
544 __ bind(zero_continue); |
|
545 } |
|
546 |
|
547 if (native_call) { |
|
548 __ li(R14_bcp, 0); // Must initialize. |
|
549 } else { |
|
550 __ add(R14_bcp, in_bytes(ConstMethod::codes_offset()), Rconst_method); |
|
551 } |
|
552 |
|
553 // Resize parent frame. |
|
554 __ mflr(R12_scratch2); |
|
555 __ neg(parent_frame_resize, parent_frame_resize); |
|
556 __ resize_frame(parent_frame_resize, R11_scratch1); |
|
557 __ std(R12_scratch2, _abi(lr), R1_SP); |
|
558 |
|
559 __ addi(R26_monitor, R1_SP, - frame::ijava_state_size); |
|
560 __ addi(R15_esp, R26_monitor, - Interpreter::stackElementSize); |
|
561 |
|
562 // Store values. |
|
563 // R15_esp, R14_bcp, R26_monitor, R28_mdx are saved at java calls |
|
564 // in InterpreterMacroAssembler::call_from_interpreter. |
|
565 __ std(R19_method, _ijava_state_neg(method), R1_SP); |
|
566 __ std(R21_sender_SP, _ijava_state_neg(sender_sp), R1_SP); |
|
567 __ std(R27_constPoolCache, _ijava_state_neg(cpoolCache), R1_SP); |
|
568 __ std(R18_locals, _ijava_state_neg(locals), R1_SP); |
|
569 |
|
570 // Note: esp, bcp, monitor, mdx live in registers. Hence, the correct version can only |
|
571 // be found in the frame after save_interpreter_state is done. This is always true |
|
572 // for non-top frames. But when a signal occurs, dumping the top frame can go wrong, |
|
573 // because e.g. frame::interpreter_frame_bcp() will not access the correct value |
|
574 // (Enhanced Stack Trace). |
|
575 // The signal handler does not save the interpreter state into the frame. |
|
576 __ li(R0, 0); |
|
577 #ifdef ASSERT |
|
578 // Fill remaining slots with constants. |
|
579 __ load_const_optimized(R11_scratch1, 0x5afe); |
|
580 __ load_const_optimized(R12_scratch2, 0xdead); |
|
581 #endif |
|
582 // We have to initialize some frame slots for native calls (accessed by GC). |
|
583 if (native_call) { |
|
584 __ std(R26_monitor, _ijava_state_neg(monitors), R1_SP); |
|
585 __ std(R14_bcp, _ijava_state_neg(bcp), R1_SP); |
|
586 if (ProfileInterpreter) { __ std(R28_mdx, _ijava_state_neg(mdx), R1_SP); } |
|
587 } |
|
588 #ifdef ASSERT |
|
589 else { |
|
590 __ std(R12_scratch2, _ijava_state_neg(monitors), R1_SP); |
|
591 __ std(R12_scratch2, _ijava_state_neg(bcp), R1_SP); |
|
592 __ std(R12_scratch2, _ijava_state_neg(mdx), R1_SP); |
|
593 } |
|
594 __ std(R11_scratch1, _ijava_state_neg(ijava_reserved), R1_SP); |
|
595 __ std(R12_scratch2, _ijava_state_neg(esp), R1_SP); |
|
596 __ std(R12_scratch2, _ijava_state_neg(lresult), R1_SP); |
|
597 __ std(R12_scratch2, _ijava_state_neg(fresult), R1_SP); |
|
598 #endif |
|
599 __ subf(R12_scratch2, top_frame_size, R1_SP); |
|
600 __ std(R0, _ijava_state_neg(oop_tmp), R1_SP); |
|
601 __ std(R12_scratch2, _ijava_state_neg(top_frame_sp), R1_SP); |
|
602 |
|
603 // Push top frame. |
|
604 __ push_frame(top_frame_size, R11_scratch1); |
|
605 } |
|
606 |
|
607 // End of helpers |
|
608 |
|
609 |
|
610 // Support abs and sqrt like in compiler. |
|
611 // For others we can use a normal (native) entry. |
|
612 |
|
613 inline bool math_entry_available(AbstractInterpreter::MethodKind kind) { |
|
614 if (!InlineIntrinsics) return false; |
|
615 |
|
616 return ((kind==Interpreter::java_lang_math_sqrt && VM_Version::has_fsqrt()) || |
|
617 (kind==Interpreter::java_lang_math_abs)); |
|
618 } |
|
619 |
|
620 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) { |
|
621 if (!math_entry_available(kind)) { |
|
622 NOT_PRODUCT(__ should_not_reach_here();) |
|
623 return NULL; |
|
624 } |
|
625 |
|
626 address entry = __ pc(); |
|
627 |
|
628 __ lfd(F1_RET, Interpreter::stackElementSize, R15_esp); |
|
629 |
|
630 // Pop c2i arguments (if any) off when we return. |
|
631 #ifdef ASSERT |
|
632 __ ld(R9_ARG7, 0, R1_SP); |
|
633 __ ld(R10_ARG8, 0, R21_sender_SP); |
|
634 __ cmpd(CCR0, R9_ARG7, R10_ARG8); |
|
635 __ asm_assert_eq("backlink", 0x545); |
|
636 #endif // ASSERT |
|
637 __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started. |
|
638 |
|
639 if (kind == Interpreter::java_lang_math_sqrt) { |
|
640 __ fsqrt(F1_RET, F1_RET); |
|
641 } else if (kind == Interpreter::java_lang_math_abs) { |
|
642 __ fabs(F1_RET, F1_RET); |
|
643 } else { |
|
644 ShouldNotReachHere(); |
|
645 } |
|
646 |
|
647 // And we're done. |
|
648 __ blr(); |
|
649 |
|
650 __ flush(); |
|
651 |
|
652 return entry; |
|
653 } |
|
654 |
|
655 // Interpreter stub for calling a native method. (asm interpreter) |
|
656 // This sets up a somewhat different looking stack for calling the |
|
657 // native method than the typical interpreter frame setup. |
|
658 // |
|
659 // On entry: |
|
660 // R19_method - method |
|
661 // R16_thread - JavaThread* |
|
662 // R15_esp - intptr_t* sender tos |
|
663 // |
|
664 // abstract stack (grows up) |
|
665 // [ IJava (caller of JNI callee) ] <-- ASP |
|
666 // ... |
|
667 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { |
|
668 |
|
669 address entry = __ pc(); |
|
670 |
|
671 const bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; |
|
672 |
|
673 // ----------------------------------------------------------------------------- |
|
674 // Allocate a new frame that represents the native callee (i2n frame). |
|
675 // This is not a full-blown interpreter frame, but in particular, the |
|
676 // following registers are valid after this: |
|
677 // - R19_method |
|
678 // - R18_local (points to start of argumuments to native function) |
|
679 // |
|
680 // abstract stack (grows up) |
|
681 // [ IJava (caller of JNI callee) ] <-- ASP |
|
682 // ... |
|
683 |
|
684 const Register signature_handler_fd = R11_scratch1; |
|
685 const Register pending_exception = R0; |
|
686 const Register result_handler_addr = R31; |
|
687 const Register native_method_fd = R11_scratch1; |
|
688 const Register access_flags = R22_tmp2; |
|
689 const Register active_handles = R11_scratch1; // R26_monitor saved to state. |
|
690 const Register sync_state = R12_scratch2; |
|
691 const Register sync_state_addr = sync_state; // Address is dead after use. |
|
692 const Register suspend_flags = R11_scratch1; |
|
693 |
|
694 //============================================================================= |
|
695 // Allocate new frame and initialize interpreter state. |
|
696 |
|
697 Label exception_return; |
|
698 Label exception_return_sync_check; |
|
699 Label stack_overflow_return; |
|
700 |
|
701 // Generate new interpreter state and jump to stack_overflow_return in case of |
|
702 // a stack overflow. |
|
703 //generate_compute_interpreter_state(stack_overflow_return); |
|
704 |
|
705 Register size_of_parameters = R22_tmp2; |
|
706 |
|
707 generate_fixed_frame(true, size_of_parameters, noreg /* unused */); |
|
708 |
|
709 //============================================================================= |
|
710 // Increment invocation counter. On overflow, entry to JNI method |
|
711 // will be compiled. |
|
712 Label invocation_counter_overflow, continue_after_compile; |
|
713 if (inc_counter) { |
|
714 if (synchronized) { |
|
715 // Since at this point in the method invocation the exception handler |
|
716 // would try to exit the monitor of synchronized methods which hasn't |
|
717 // been entered yet, we set the thread local variable |
|
718 // _do_not_unlock_if_synchronized to true. If any exception was thrown by |
|
719 // runtime, exception handling i.e. unlock_if_synchronized_method will |
|
720 // check this thread local flag. |
|
721 // This flag has two effects, one is to force an unwind in the topmost |
|
722 // interpreter frame and not perform an unlock while doing so. |
|
723 __ li(R0, 1); |
|
724 __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); |
|
725 } |
|
726 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); |
|
727 |
|
728 BIND(continue_after_compile); |
|
729 // Reset the _do_not_unlock_if_synchronized flag. |
|
730 if (synchronized) { |
|
731 __ li(R0, 0); |
|
732 __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); |
|
733 } |
|
734 } |
|
735 |
|
736 // access_flags = method->access_flags(); |
|
737 // Load access flags. |
|
738 assert(access_flags->is_nonvolatile(), |
|
739 "access_flags must be in a non-volatile register"); |
|
740 // Type check. |
|
741 assert(4 == sizeof(AccessFlags), "unexpected field size"); |
|
742 __ lwz(access_flags, method_(access_flags)); |
|
743 |
|
744 // We don't want to reload R19_method and access_flags after calls |
|
745 // to some helper functions. |
|
746 assert(R19_method->is_nonvolatile(), |
|
747 "R19_method must be a non-volatile register"); |
|
748 |
|
749 // Check for synchronized methods. Must happen AFTER invocation counter |
|
750 // check, so method is not locked if counter overflows. |
|
751 |
|
752 if (synchronized) { |
|
753 lock_method(access_flags, R11_scratch1, R12_scratch2, true); |
|
754 |
|
755 // Update monitor in state. |
|
756 __ ld(R11_scratch1, 0, R1_SP); |
|
757 __ std(R26_monitor, _ijava_state_neg(monitors), R11_scratch1); |
|
758 } |
|
759 |
|
760 // jvmti/jvmpi support |
|
761 __ notify_method_entry(); |
|
762 |
|
763 //============================================================================= |
|
764 // Get and call the signature handler. |
|
765 |
|
766 __ ld(signature_handler_fd, method_(signature_handler)); |
|
767 Label call_signature_handler; |
|
768 |
|
769 __ cmpdi(CCR0, signature_handler_fd, 0); |
|
770 __ bne(CCR0, call_signature_handler); |
|
771 |
|
772 // Method has never been called. Either generate a specialized |
|
773 // handler or point to the slow one. |
|
774 // |
|
775 // Pass parameter 'false' to avoid exception check in call_VM. |
|
776 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R19_method, false); |
|
777 |
|
778 // Check for an exception while looking up the target method. If we |
|
779 // incurred one, bail. |
|
780 __ ld(pending_exception, thread_(pending_exception)); |
|
781 __ cmpdi(CCR0, pending_exception, 0); |
|
782 __ bne(CCR0, exception_return_sync_check); // Has pending exception. |
|
783 |
|
784 // Reload signature handler, it may have been created/assigned in the meanwhile. |
|
785 __ ld(signature_handler_fd, method_(signature_handler)); |
|
786 __ twi_0(signature_handler_fd); // Order wrt. load of klass mirror and entry point (isync is below). |
|
787 |
|
788 BIND(call_signature_handler); |
|
789 |
|
790 // Before we call the signature handler we push a new frame to |
|
791 // protect the interpreter frame volatile registers when we return |
|
792 // from jni but before we can get back to Java. |
|
793 |
|
794 // First set the frame anchor while the SP/FP registers are |
|
795 // convenient and the slow signature handler can use this same frame |
|
796 // anchor. |
|
797 |
|
798 // We have a TOP_IJAVA_FRAME here, which belongs to us. |
|
799 __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R12_scratch2/*tmp*/); |
|
800 |
|
801 // Now the interpreter frame (and its call chain) have been |
|
802 // invalidated and flushed. We are now protected against eager |
|
803 // being enabled in native code. Even if it goes eager the |
|
804 // registers will be reloaded as clean and we will invalidate after |
|
805 // the call so no spurious flush should be possible. |
|
806 |
|
807 // Call signature handler and pass locals address. |
|
808 // |
|
809 // Our signature handlers copy required arguments to the C stack |
|
810 // (outgoing C args), R3_ARG1 to R10_ARG8, and FARG1 to FARG13. |
|
811 __ mr(R3_ARG1, R18_locals); |
|
812 #if !defined(ABI_ELFv2) |
|
813 __ ld(signature_handler_fd, 0, signature_handler_fd); |
|
814 #endif |
|
815 |
|
816 __ call_stub(signature_handler_fd); |
|
817 |
|
818 // Remove the register parameter varargs slots we allocated in |
|
819 // compute_interpreter_state. SP+16 ends up pointing to the ABI |
|
820 // outgoing argument area. |
|
821 // |
|
822 // Not needed on PPC64. |
|
823 //__ add(SP, SP, Argument::n_register_parameters*BytesPerWord); |
|
824 |
|
825 assert(result_handler_addr->is_nonvolatile(), "result_handler_addr must be in a non-volatile register"); |
|
826 // Save across call to native method. |
|
827 __ mr(result_handler_addr, R3_RET); |
|
828 |
|
829 __ isync(); // Acquire signature handler before trying to fetch the native entry point and klass mirror. |
|
830 |
|
831 // Set up fixed parameters and call the native method. |
|
832 // If the method is static, get mirror into R4_ARG2. |
|
833 { |
|
834 Label method_is_not_static; |
|
835 // Access_flags is non-volatile and still, no need to restore it. |
|
836 |
|
837 // Restore access flags. |
|
838 __ testbitdi(CCR0, R0, access_flags, JVM_ACC_STATIC_BIT); |
|
839 __ bfalse(CCR0, method_is_not_static); |
|
840 |
|
841 // constants = method->constants(); |
|
842 __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method); |
|
843 __ ld(R11_scratch1, in_bytes(ConstMethod::constants_offset()), R11_scratch1); |
|
844 // pool_holder = method->constants()->pool_holder(); |
|
845 __ ld(R11_scratch1/*pool_holder*/, ConstantPool::pool_holder_offset_in_bytes(), |
|
846 R11_scratch1/*constants*/); |
|
847 |
|
848 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); |
|
849 |
|
850 // mirror = pool_holder->klass_part()->java_mirror(); |
|
851 __ ld(R0/*mirror*/, mirror_offset, R11_scratch1/*pool_holder*/); |
|
852 // state->_native_mirror = mirror; |
|
853 |
|
854 __ ld(R11_scratch1, 0, R1_SP); |
|
855 __ std(R0/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1); |
|
856 // R4_ARG2 = &state->_oop_temp; |
|
857 __ addi(R4_ARG2, R11_scratch1, _ijava_state_neg(oop_tmp)); |
|
858 BIND(method_is_not_static); |
|
859 } |
|
860 |
|
861 // At this point, arguments have been copied off the stack into |
|
862 // their JNI positions. Oops are boxed in-place on the stack, with |
|
863 // handles copied to arguments. The result handler address is in a |
|
864 // register. |
|
865 |
|
866 // Pass JNIEnv address as first parameter. |
|
867 __ addir(R3_ARG1, thread_(jni_environment)); |
|
868 |
|
869 // Load the native_method entry before we change the thread state. |
|
870 __ ld(native_method_fd, method_(native_function)); |
|
871 |
|
872 //============================================================================= |
|
873 // Transition from _thread_in_Java to _thread_in_native. As soon as |
|
874 // we make this change the safepoint code needs to be certain that |
|
875 // the last Java frame we established is good. The pc in that frame |
|
876 // just needs to be near here not an actual return address. |
|
877 |
|
878 // We use release_store_fence to update values like the thread state, where |
|
879 // we don't want the current thread to continue until all our prior memory |
|
880 // accesses (including the new thread state) are visible to other threads. |
|
881 __ li(R0, _thread_in_native); |
|
882 __ release(); |
|
883 |
|
884 // TODO PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); |
|
885 __ stw(R0, thread_(thread_state)); |
|
886 |
|
887 if (UseMembar) { |
|
888 __ fence(); |
|
889 } |
|
890 |
|
891 //============================================================================= |
|
892 // Call the native method. Argument registers must not have been |
|
893 // overwritten since "__ call_stub(signature_handler);" (except for |
|
894 // ARG1 and ARG2 for static methods). |
|
895 __ call_c(native_method_fd); |
|
896 |
|
897 __ li(R0, 0); |
|
898 __ ld(R11_scratch1, 0, R1_SP); |
|
899 __ std(R3_RET, _ijava_state_neg(lresult), R11_scratch1); |
|
900 __ stfd(F1_RET, _ijava_state_neg(fresult), R11_scratch1); |
|
901 __ std(R0/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1); // reset |
|
902 |
|
903 // Note: C++ interpreter needs the following here: |
|
904 // The frame_manager_lr field, which we use for setting the last |
|
905 // java frame, gets overwritten by the signature handler. Restore |
|
906 // it now. |
|
907 //__ get_PC_trash_LR(R11_scratch1); |
|
908 //__ std(R11_scratch1, _top_ijava_frame_abi(frame_manager_lr), R1_SP); |
|
909 |
|
910 // Because of GC R19_method may no longer be valid. |
|
911 |
|
912 // Block, if necessary, before resuming in _thread_in_Java state. |
|
913 // In order for GC to work, don't clear the last_Java_sp until after |
|
914 // blocking. |
|
915 |
|
916 //============================================================================= |
|
917 // Switch thread to "native transition" state before reading the |
|
918 // synchronization state. This additional state is necessary |
|
919 // because reading and testing the synchronization state is not |
|
920 // atomic w.r.t. GC, as this scenario demonstrates: Java thread A, |
|
921 // in _thread_in_native state, loads _not_synchronized and is |
|
922 // preempted. VM thread changes sync state to synchronizing and |
|
923 // suspends threads for GC. Thread A is resumed to finish this |
|
924 // native method, but doesn't block here since it didn't see any |
|
925 // synchronization in progress, and escapes. |
|
926 |
|
927 // We use release_store_fence to update values like the thread state, where |
|
928 // we don't want the current thread to continue until all our prior memory |
|
929 // accesses (including the new thread state) are visible to other threads. |
|
930 __ li(R0/*thread_state*/, _thread_in_native_trans); |
|
931 __ release(); |
|
932 __ stw(R0/*thread_state*/, thread_(thread_state)); |
|
933 if (UseMembar) { |
|
934 __ fence(); |
|
935 } |
|
936 // Write serialization page so that the VM thread can do a pseudo remote |
|
937 // membar. We use the current thread pointer to calculate a thread |
|
938 // specific offset to write to within the page. This minimizes bus |
|
939 // traffic due to cache line collision. |
|
940 else { |
|
941 __ serialize_memory(R16_thread, R11_scratch1, R12_scratch2); |
|
942 } |
|
943 |
|
944 // Now before we return to java we must look for a current safepoint |
|
945 // (a new safepoint can not start since we entered native_trans). |
|
946 // We must check here because a current safepoint could be modifying |
|
947 // the callers registers right this moment. |
|
948 |
|
949 // Acquire isn't strictly necessary here because of the fence, but |
|
950 // sync_state is declared to be volatile, so we do it anyway |
|
951 // (cmp-br-isync on one path, release (same as acquire on PPC64) on the other path). |
|
952 int sync_state_offs = __ load_const_optimized(sync_state_addr, SafepointSynchronize::address_of_state(), /*temp*/R0, true); |
|
953 |
|
954 // TODO PPC port assert(4 == SafepointSynchronize::sz_state(), "unexpected field size"); |
|
955 __ lwz(sync_state, sync_state_offs, sync_state_addr); |
|
956 |
|
957 // TODO PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size"); |
|
958 __ lwz(suspend_flags, thread_(suspend_flags)); |
|
959 |
|
960 Label sync_check_done; |
|
961 Label do_safepoint; |
|
962 // No synchronization in progress nor yet synchronized. |
|
963 __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized); |
|
964 // Not suspended. |
|
965 __ cmpwi(CCR1, suspend_flags, 0); |
|
966 |
|
967 __ bne(CCR0, do_safepoint); |
|
968 __ beq(CCR1, sync_check_done); |
|
969 __ bind(do_safepoint); |
|
970 __ isync(); |
|
971 // Block. We do the call directly and leave the current |
|
972 // last_Java_frame setup undisturbed. We must save any possible |
|
973 // native result across the call. No oop is present. |
|
974 |
|
975 __ mr(R3_ARG1, R16_thread); |
|
976 #if defined(ABI_ELFv2) |
|
977 __ call_c(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), |
|
978 relocInfo::none); |
|
979 #else |
|
980 __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, JavaThread::check_special_condition_for_native_trans), |
|
981 relocInfo::none); |
|
982 #endif |
|
983 |
|
984 __ bind(sync_check_done); |
|
985 |
|
986 //============================================================================= |
|
987 // <<<<<< Back in Interpreter Frame >>>>> |
|
988 |
|
989 // We are in thread_in_native_trans here and back in the normal |
|
990 // interpreter frame. We don't have to do anything special about |
|
991 // safepoints and we can switch to Java mode anytime we are ready. |
|
992 |
|
993 // Note: frame::interpreter_frame_result has a dependency on how the |
|
994 // method result is saved across the call to post_method_exit. For |
|
995 // native methods it assumes that the non-FPU/non-void result is |
|
996 // saved in _native_lresult and a FPU result in _native_fresult. If |
|
997 // this changes then the interpreter_frame_result implementation |
|
998 // will need to be updated too. |
|
999 |
|
1000 // On PPC64, we have stored the result directly after the native call. |
|
1001 |
|
1002 //============================================================================= |
|
1003 // Back in Java |
|
1004 |
|
1005 // We use release_store_fence to update values like the thread state, where |
|
1006 // we don't want the current thread to continue until all our prior memory |
|
1007 // accesses (including the new thread state) are visible to other threads. |
|
1008 __ li(R0/*thread_state*/, _thread_in_Java); |
|
1009 __ release(); |
|
1010 __ stw(R0/*thread_state*/, thread_(thread_state)); |
|
1011 if (UseMembar) { |
|
1012 __ fence(); |
|
1013 } |
|
1014 |
|
1015 __ reset_last_Java_frame(); |
|
1016 |
|
1017 // Jvmdi/jvmpi support. Whether we've got an exception pending or |
|
1018 // not, and whether unlocking throws an exception or not, we notify |
|
1019 // on native method exit. If we do have an exception, we'll end up |
|
1020 // in the caller's context to handle it, so if we don't do the |
|
1021 // notify here, we'll drop it on the floor. |
|
1022 __ notify_method_exit(true/*native method*/, |
|
1023 ilgl /*illegal state (not used for native methods)*/, |
|
1024 InterpreterMacroAssembler::NotifyJVMTI, |
|
1025 false /*check_exceptions*/); |
|
1026 |
|
1027 //============================================================================= |
|
1028 // Handle exceptions |
|
1029 |
|
1030 if (synchronized) { |
|
1031 // Don't check for exceptions since we're still in the i2n frame. Do that |
|
1032 // manually afterwards. |
|
1033 unlock_method(false); |
|
1034 } |
|
1035 |
|
1036 // Reset active handles after returning from native. |
|
1037 // thread->active_handles()->clear(); |
|
1038 __ ld(active_handles, thread_(active_handles)); |
|
1039 // TODO PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size"); |
|
1040 __ li(R0, 0); |
|
1041 __ stw(R0, JNIHandleBlock::top_offset_in_bytes(), active_handles); |
|
1042 |
|
1043 Label exception_return_sync_check_already_unlocked; |
|
1044 __ ld(R0/*pending_exception*/, thread_(pending_exception)); |
|
1045 __ cmpdi(CCR0, R0/*pending_exception*/, 0); |
|
1046 __ bne(CCR0, exception_return_sync_check_already_unlocked); |
|
1047 |
|
1048 //----------------------------------------------------------------------------- |
|
1049 // No exception pending. |
|
1050 |
|
1051 // Move native method result back into proper registers and return. |
|
1052 // Invoke result handler (may unbox/promote). |
|
1053 __ ld(R11_scratch1, 0, R1_SP); |
|
1054 __ ld(R3_RET, _ijava_state_neg(lresult), R11_scratch1); |
|
1055 __ lfd(F1_RET, _ijava_state_neg(fresult), R11_scratch1); |
|
1056 __ call_stub(result_handler_addr); |
|
1057 |
|
1058 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2); |
|
1059 |
|
1060 // Must use the return pc which was loaded from the caller's frame |
|
1061 // as the VM uses return-pc-patching for deoptimization. |
|
1062 __ mtlr(R0); |
|
1063 __ blr(); |
|
1064 |
|
1065 //----------------------------------------------------------------------------- |
|
1066 // An exception is pending. We call into the runtime only if the |
|
1067 // caller was not interpreted. If it was interpreted the |
|
1068 // interpreter will do the correct thing. If it isn't interpreted |
|
1069 // (call stub/compiled code) we will change our return and continue. |
|
1070 |
|
1071 BIND(exception_return_sync_check); |
|
1072 |
|
1073 if (synchronized) { |
|
1074 // Don't check for exceptions since we're still in the i2n frame. Do that |
|
1075 // manually afterwards. |
|
1076 unlock_method(false); |
|
1077 } |
|
1078 BIND(exception_return_sync_check_already_unlocked); |
|
1079 |
|
1080 const Register return_pc = R31; |
|
1081 |
|
1082 __ ld(return_pc, 0, R1_SP); |
|
1083 __ ld(return_pc, _abi(lr), return_pc); |
|
1084 |
|
1085 // Get the address of the exception handler. |
|
1086 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), |
|
1087 R16_thread, |
|
1088 return_pc /* return pc */); |
|
1089 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, noreg, R11_scratch1, R12_scratch2); |
|
1090 |
|
1091 // Load the PC of the the exception handler into LR. |
|
1092 __ mtlr(R3_RET); |
|
1093 |
|
1094 // Load exception into R3_ARG1 and clear pending exception in thread. |
|
1095 __ ld(R3_ARG1/*exception*/, thread_(pending_exception)); |
|
1096 __ li(R4_ARG2, 0); |
|
1097 __ std(R4_ARG2, thread_(pending_exception)); |
|
1098 |
|
1099 // Load the original return pc into R4_ARG2. |
|
1100 __ mr(R4_ARG2/*issuing_pc*/, return_pc); |
|
1101 |
|
1102 // Return to exception handler. |
|
1103 __ blr(); |
|
1104 |
|
1105 //============================================================================= |
|
1106 // Counter overflow. |
|
1107 |
|
1108 if (inc_counter) { |
|
1109 // Handle invocation counter overflow. |
|
1110 __ bind(invocation_counter_overflow); |
|
1111 |
|
1112 generate_counter_overflow(continue_after_compile); |
|
1113 } |
|
1114 |
|
1115 return entry; |
|
1116 } |
|
1117 |
|
1118 // Generic interpreted method entry to (asm) interpreter. |
|
1119 // |
|
1120 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { |
|
1121 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; |
|
1122 address entry = __ pc(); |
|
1123 // Generate the code to allocate the interpreter stack frame. |
|
1124 Register Rsize_of_parameters = R4_ARG2, // Written by generate_fixed_frame. |
|
1125 Rsize_of_locals = R5_ARG3; // Written by generate_fixed_frame. |
|
1126 |
|
1127 generate_fixed_frame(false, Rsize_of_parameters, Rsize_of_locals); |
|
1128 |
|
1129 // -------------------------------------------------------------------------- |
|
1130 // Zero out non-parameter locals. |
|
1131 // Note: *Always* zero out non-parameter locals as Sparc does. It's not |
|
1132 // worth to ask the flag, just do it. |
|
1133 Register Rslot_addr = R6_ARG4, |
|
1134 Rnum = R7_ARG5; |
|
1135 Label Lno_locals, Lzero_loop; |
|
1136 |
|
1137 // Set up the zeroing loop. |
|
1138 __ subf(Rnum, Rsize_of_parameters, Rsize_of_locals); |
|
1139 __ subf(Rslot_addr, Rsize_of_parameters, R18_locals); |
|
1140 __ srdi_(Rnum, Rnum, Interpreter::logStackElementSize); |
|
1141 __ beq(CCR0, Lno_locals); |
|
1142 __ li(R0, 0); |
|
1143 __ mtctr(Rnum); |
|
1144 |
|
1145 // The zero locals loop. |
|
1146 __ bind(Lzero_loop); |
|
1147 __ std(R0, 0, Rslot_addr); |
|
1148 __ addi(Rslot_addr, Rslot_addr, -Interpreter::stackElementSize); |
|
1149 __ bdnz(Lzero_loop); |
|
1150 |
|
1151 __ bind(Lno_locals); |
|
1152 |
|
1153 // -------------------------------------------------------------------------- |
|
1154 // Counter increment and overflow check. |
|
1155 Label invocation_counter_overflow, |
|
1156 profile_method, |
|
1157 profile_method_continue; |
|
1158 if (inc_counter || ProfileInterpreter) { |
|
1159 |
|
1160 Register Rdo_not_unlock_if_synchronized_addr = R11_scratch1; |
|
1161 if (synchronized) { |
|
1162 // Since at this point in the method invocation the exception handler |
|
1163 // would try to exit the monitor of synchronized methods which hasn't |
|
1164 // been entered yet, we set the thread local variable |
|
1165 // _do_not_unlock_if_synchronized to true. If any exception was thrown by |
|
1166 // runtime, exception handling i.e. unlock_if_synchronized_method will |
|
1167 // check this thread local flag. |
|
1168 // This flag has two effects, one is to force an unwind in the topmost |
|
1169 // interpreter frame and not perform an unlock while doing so. |
|
1170 __ li(R0, 1); |
|
1171 __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); |
|
1172 } |
|
1173 |
|
1174 // Argument and return type profiling. |
|
1175 __ profile_parameters_type(R3_ARG1, R4_ARG2, R5_ARG3, R6_ARG4); |
|
1176 |
|
1177 // Increment invocation counter and check for overflow. |
|
1178 if (inc_counter) { |
|
1179 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue); |
|
1180 } |
|
1181 |
|
1182 __ bind(profile_method_continue); |
|
1183 |
|
1184 // Reset the _do_not_unlock_if_synchronized flag. |
|
1185 if (synchronized) { |
|
1186 __ li(R0, 0); |
|
1187 __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); |
|
1188 } |
|
1189 } |
|
1190 |
|
1191 // -------------------------------------------------------------------------- |
|
1192 // Locking of synchronized methods. Must happen AFTER invocation_counter |
|
1193 // check and stack overflow check, so method is not locked if overflows. |
|
1194 if (synchronized) { |
|
1195 lock_method(R3_ARG1, R4_ARG2, R5_ARG3); |
|
1196 } |
|
1197 #ifdef ASSERT |
|
1198 else { |
|
1199 Label Lok; |
|
1200 __ lwz(R0, in_bytes(Method::access_flags_offset()), R19_method); |
|
1201 __ andi_(R0, R0, JVM_ACC_SYNCHRONIZED); |
|
1202 __ asm_assert_eq("method needs synchronization", 0x8521); |
|
1203 __ bind(Lok); |
|
1204 } |
|
1205 #endif // ASSERT |
|
1206 |
|
1207 __ verify_thread(); |
|
1208 |
|
1209 // -------------------------------------------------------------------------- |
|
1210 // JVMTI support |
|
1211 __ notify_method_entry(); |
|
1212 |
|
1213 // -------------------------------------------------------------------------- |
|
1214 // Start executing instructions. |
|
1215 __ dispatch_next(vtos); |
|
1216 |
|
1217 // -------------------------------------------------------------------------- |
|
1218 // Out of line counter overflow and MDO creation code. |
|
1219 if (ProfileInterpreter) { |
|
1220 // We have decided to profile this method in the interpreter. |
|
1221 __ bind(profile_method); |
|
1222 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); |
|
1223 __ set_method_data_pointer_for_bcp(); |
|
1224 __ b(profile_method_continue); |
|
1225 } |
|
1226 |
|
1227 if (inc_counter) { |
|
1228 // Handle invocation counter overflow. |
|
1229 __ bind(invocation_counter_overflow); |
|
1230 generate_counter_overflow(profile_method_continue); |
|
1231 } |
|
1232 return entry; |
|
1233 } |
|
1234 |
|
1235 // CRC32 Intrinsics. |
|
1236 // |
|
1237 // Contract on scratch and work registers. |
|
1238 // ======================================= |
|
1239 // |
|
1240 // On ppc, the register set {R2..R12} is available in the interpreter as scratch/work registers. |
|
1241 // You should, however, keep in mind that {R3_ARG1..R10_ARG8} is the C-ABI argument register set. |
|
1242 // You can't rely on these registers across calls. |
|
1243 // |
|
1244 // The generators for CRC32_update and for CRC32_updateBytes use the |
|
1245 // scratch/work register set internally, passing the work registers |
|
1246 // as arguments to the MacroAssembler emitters as required. |
|
1247 // |
|
1248 // R3_ARG1..R6_ARG4 are preset to hold the incoming java arguments. |
|
1249 // Their contents is not constant but may change according to the requirements |
|
1250 // of the emitted code. |
|
1251 // |
|
1252 // All other registers from the scratch/work register set are used "internally" |
|
1253 // and contain garbage (i.e. unpredictable values) once blr() is reached. |
|
1254 // Basically, only R3_RET contains a defined value which is the function result. |
|
1255 // |
|
1256 /** |
|
1257 * Method entry for static native methods: |
|
1258 * int java.util.zip.CRC32.update(int crc, int b) |
|
1259 */ |
|
1260 address InterpreterGenerator::generate_CRC32_update_entry() { |
|
1261 if (UseCRC32Intrinsics) { |
|
1262 address start = __ pc(); // Remember stub start address (is rtn value). |
|
1263 Label slow_path; |
|
1264 |
|
1265 // Safepoint check |
|
1266 const Register sync_state = R11_scratch1; |
|
1267 int sync_state_offs = __ load_const_optimized(sync_state, SafepointSynchronize::address_of_state(), /*temp*/R0, true); |
|
1268 __ lwz(sync_state, sync_state_offs, sync_state); |
|
1269 __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized); |
|
1270 __ bne(CCR0, slow_path); |
|
1271 |
|
1272 // We don't generate local frame and don't align stack because |
|
1273 // we not even call stub code (we generate the code inline) |
|
1274 // and there is no safepoint on this path. |
|
1275 |
|
1276 // Load java parameters. |
|
1277 // R15_esp is callers operand stack pointer, i.e. it points to the parameters. |
|
1278 const Register argP = R15_esp; |
|
1279 const Register crc = R3_ARG1; // crc value |
|
1280 const Register data = R4_ARG2; // address of java byte value (kernel_crc32 needs address) |
|
1281 const Register dataLen = R5_ARG3; // source data len (1 byte). Not used because calling the single-byte emitter. |
|
1282 const Register table = R6_ARG4; // address of crc32 table |
|
1283 const Register tmp = dataLen; // Reuse unused len register to show we don't actually need a separate tmp here. |
|
1284 |
|
1285 BLOCK_COMMENT("CRC32_update {"); |
|
1286 |
|
1287 // Arguments are reversed on java expression stack |
|
1288 #ifdef VM_LITTLE_ENDIAN |
|
1289 __ addi(data, argP, 0+1*wordSize); // (stack) address of byte value. Emitter expects address, not value. |
|
1290 // Being passed as an int, the single byte is at offset +0. |
|
1291 #else |
|
1292 __ addi(data, argP, 3+1*wordSize); // (stack) address of byte value. Emitter expects address, not value. |
|
1293 // Being passed from java as an int, the single byte is at offset +3. |
|
1294 #endif |
|
1295 __ lwz(crc, 2*wordSize, argP); // Current crc state, zero extend to 64 bit to have a clean register. |
|
1296 |
|
1297 StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table); |
|
1298 __ kernel_crc32_singleByte(crc, data, dataLen, table, tmp); |
|
1299 |
|
1300 // Restore caller sp for c2i case and return. |
|
1301 __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started. |
|
1302 __ blr(); |
|
1303 |
|
1304 // Generate a vanilla native entry as the slow path. |
|
1305 BLOCK_COMMENT("} CRC32_update"); |
|
1306 BIND(slow_path); |
|
1307 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native), R11_scratch1); |
|
1308 return start; |
|
1309 } |
|
1310 |
|
1311 return NULL; |
|
1312 } |
|
1313 |
|
1314 // CRC32 Intrinsics. |
|
1315 /** |
|
1316 * Method entry for static native methods: |
|
1317 * int java.util.zip.CRC32.updateBytes( int crc, byte[] b, int off, int len) |
|
1318 * int java.util.zip.CRC32.updateByteBuffer(int crc, long* buf, int off, int len) |
|
1319 */ |
|
1320 address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { |
|
1321 if (UseCRC32Intrinsics) { |
|
1322 address start = __ pc(); // Remember stub start address (is rtn value). |
|
1323 Label slow_path; |
|
1324 |
|
1325 // Safepoint check |
|
1326 const Register sync_state = R11_scratch1; |
|
1327 int sync_state_offs = __ load_const_optimized(sync_state, SafepointSynchronize::address_of_state(), /*temp*/R0, true); |
|
1328 __ lwz(sync_state, sync_state_offs, sync_state); |
|
1329 __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized); |
|
1330 __ bne(CCR0, slow_path); |
|
1331 |
|
1332 // We don't generate local frame and don't align stack because |
|
1333 // we not even call stub code (we generate the code inline) |
|
1334 // and there is no safepoint on this path. |
|
1335 |
|
1336 // Load parameters. |
|
1337 // Z_esp is callers operand stack pointer, i.e. it points to the parameters. |
|
1338 const Register argP = R15_esp; |
|
1339 const Register crc = R3_ARG1; // crc value |
|
1340 const Register data = R4_ARG2; // address of java byte array |
|
1341 const Register dataLen = R5_ARG3; // source data len |
|
1342 const Register table = R6_ARG4; // address of crc32 table |
|
1343 |
|
1344 const Register t0 = R9; // scratch registers for crc calculation |
|
1345 const Register t1 = R10; |
|
1346 const Register t2 = R11; |
|
1347 const Register t3 = R12; |
|
1348 |
|
1349 const Register tc0 = R2; // registers to hold pre-calculated column addresses |
|
1350 const Register tc1 = R7; |
|
1351 const Register tc2 = R8; |
|
1352 const Register tc3 = table; // table address is reconstructed at the end of kernel_crc32_* emitters |
|
1353 |
|
1354 const Register tmp = t0; // Only used very locally to calculate byte buffer address. |
|
1355 |
|
1356 // Arguments are reversed on java expression stack. |
|
1357 // Calculate address of start element. |
|
1358 if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { // Used for "updateByteBuffer direct". |
|
1359 BLOCK_COMMENT("CRC32_updateByteBuffer {"); |
|
1360 // crc @ (SP + 5W) (32bit) |
|
1361 // buf @ (SP + 3W) (64bit ptr to long array) |
|
1362 // off @ (SP + 2W) (32bit) |
|
1363 // dataLen @ (SP + 1W) (32bit) |
|
1364 // data = buf + off |
|
1365 __ ld( data, 3*wordSize, argP); // start of byte buffer |
|
1366 __ lwa( tmp, 2*wordSize, argP); // byte buffer offset |
|
1367 __ lwa( dataLen, 1*wordSize, argP); // #bytes to process |
|
1368 __ lwz( crc, 5*wordSize, argP); // current crc state |
|
1369 __ add( data, data, tmp); // Add byte buffer offset. |
|
1370 } else { // Used for "updateBytes update". |
|
1371 BLOCK_COMMENT("CRC32_updateBytes {"); |
|
1372 // crc @ (SP + 4W) (32bit) |
|
1373 // buf @ (SP + 3W) (64bit ptr to byte array) |
|
1374 // off @ (SP + 2W) (32bit) |
|
1375 // dataLen @ (SP + 1W) (32bit) |
|
1376 // data = buf + off + base_offset |
|
1377 __ ld( data, 3*wordSize, argP); // start of byte buffer |
|
1378 __ lwa( tmp, 2*wordSize, argP); // byte buffer offset |
|
1379 __ lwa( dataLen, 1*wordSize, argP); // #bytes to process |
|
1380 __ add( data, data, tmp); // add byte buffer offset |
|
1381 __ lwz( crc, 4*wordSize, argP); // current crc state |
|
1382 __ addi(data, data, arrayOopDesc::base_offset_in_bytes(T_BYTE)); |
|
1383 } |
|
1384 |
|
1385 StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table); |
|
1386 |
|
1387 // Performance measurements show the 1word and 2word variants to be almost equivalent, |
|
1388 // with very light advantages for the 1word variant. We chose the 1word variant for |
|
1389 // code compactness. |
|
1390 __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, tc3); |
|
1391 |
|
1392 // Restore caller sp for c2i case and return. |
|
1393 __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started. |
|
1394 __ blr(); |
|
1395 |
|
1396 // Generate a vanilla native entry as the slow path. |
|
1397 BLOCK_COMMENT("} CRC32_updateBytes(Buffer)"); |
|
1398 BIND(slow_path); |
|
1399 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native), R11_scratch1); |
|
1400 return start; |
|
1401 } |
|
1402 |
|
1403 return NULL; |
|
1404 } |
53 } |
1405 |
54 |
1406 // These should never be compiled since the interpreter will prefer |
55 // These should never be compiled since the interpreter will prefer |
1407 // the compiled version to the intrinsic version. |
56 // the compiled version to the intrinsic version. |
1408 bool AbstractInterpreter::can_be_compiled(methodHandle m) { |
57 bool AbstractInterpreter::can_be_compiled(methodHandle m) { |
1409 return !math_entry_available(method_kind(m)); |
58 return !TemplateInterpreter::math_entry_available(method_kind(m)); |
1410 } |
59 } |
1411 |
60 |
1412 // How much stack a method activation needs in stack slots. |
61 // How much stack a method activation needs in stack slots. |
1413 // We must calc this exactly like in generate_fixed_frame. |
62 // We must calc this exactly like in generate_fixed_frame. |
1414 // Note: This returns the conservative size assuming maximum alignment. |
63 // Note: This returns the conservative size assuming maximum alignment. |
1503 if (!is_bottom_frame) { |
152 if (!is_bottom_frame) { |
1504 interpreter_frame->interpreter_frame_set_sender_sp(sender_sp); |
153 interpreter_frame->interpreter_frame_set_sender_sp(sender_sp); |
1505 } |
154 } |
1506 } |
155 } |
1507 |
156 |
1508 // ============================================================================= |
157 // Support abs and sqrt like in compiler. |
1509 // Exceptions |
158 // For others we can use a normal (native) entry. |
1510 |
159 |
1511 void TemplateInterpreterGenerator::generate_throw_exception() { |
160 bool TemplateInterpreter::math_entry_available(AbstractInterpreter::MethodKind kind) { |
1512 Register Rexception = R17_tos, |
161 if (!InlineIntrinsics) return false; |
1513 Rcontinuation = R3_RET; |
|
1514 |
162 |
1515 // -------------------------------------------------------------------------- |
163 return ((kind==Interpreter::java_lang_math_sqrt && VM_Version::has_fsqrt()) || |
1516 // Entry point if an method returns with a pending exception (rethrow). |
164 (kind==Interpreter::java_lang_math_abs)); |
1517 Interpreter::_rethrow_exception_entry = __ pc(); |
|
1518 { |
|
1519 __ restore_interpreter_state(R11_scratch1); // Sets R11_scratch1 = fp. |
|
1520 __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1); |
|
1521 __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0); |
|
1522 |
|
1523 // Compiled code destroys templateTableBase, reload. |
|
1524 __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1); |
|
1525 } |
|
1526 |
|
1527 // Entry point if a interpreted method throws an exception (throw). |
|
1528 Interpreter::_throw_exception_entry = __ pc(); |
|
1529 { |
|
1530 __ mr(Rexception, R3_RET); |
|
1531 |
|
1532 __ verify_thread(); |
|
1533 __ verify_oop(Rexception); |
|
1534 |
|
1535 // Expression stack must be empty before entering the VM in case of an exception. |
|
1536 __ empty_expression_stack(); |
|
1537 // Find exception handler address and preserve exception oop. |
|
1538 // Call C routine to find handler and jump to it. |
|
1539 __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Rexception); |
|
1540 __ mtctr(Rcontinuation); |
|
1541 // Push exception for exception handler bytecodes. |
|
1542 __ push_ptr(Rexception); |
|
1543 |
|
1544 // Jump to exception handler (may be remove activation entry!). |
|
1545 __ bctr(); |
|
1546 } |
|
1547 |
|
1548 // If the exception is not handled in the current frame the frame is |
|
1549 // removed and the exception is rethrown (i.e. exception |
|
1550 // continuation is _rethrow_exception). |
|
1551 // |
|
1552 // Note: At this point the bci is still the bxi for the instruction |
|
1553 // which caused the exception and the expression stack is |
|
1554 // empty. Thus, for any VM calls at this point, GC will find a legal |
|
1555 // oop map (with empty expression stack). |
|
1556 |
|
1557 // In current activation |
|
1558 // tos: exception |
|
1559 // bcp: exception bcp |
|
1560 |
|
1561 // -------------------------------------------------------------------------- |
|
1562 // JVMTI PopFrame support |
|
1563 |
|
1564 Interpreter::_remove_activation_preserving_args_entry = __ pc(); |
|
1565 { |
|
1566 // Set the popframe_processing bit in popframe_condition indicating that we are |
|
1567 // currently handling popframe, so that call_VMs that may happen later do not |
|
1568 // trigger new popframe handling cycles. |
|
1569 __ lwz(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread); |
|
1570 __ ori(R11_scratch1, R11_scratch1, JavaThread::popframe_processing_bit); |
|
1571 __ stw(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread); |
|
1572 |
|
1573 // Empty the expression stack, as in normal exception handling. |
|
1574 __ empty_expression_stack(); |
|
1575 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false); |
|
1576 |
|
1577 // Check to see whether we are returning to a deoptimized frame. |
|
1578 // (The PopFrame call ensures that the caller of the popped frame is |
|
1579 // either interpreted or compiled and deoptimizes it if compiled.) |
|
1580 // Note that we don't compare the return PC against the |
|
1581 // deoptimization blob's unpack entry because of the presence of |
|
1582 // adapter frames in C2. |
|
1583 Label Lcaller_not_deoptimized; |
|
1584 Register return_pc = R3_ARG1; |
|
1585 __ ld(return_pc, 0, R1_SP); |
|
1586 __ ld(return_pc, _abi(lr), return_pc); |
|
1587 __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), return_pc); |
|
1588 __ cmpdi(CCR0, R3_RET, 0); |
|
1589 __ bne(CCR0, Lcaller_not_deoptimized); |
|
1590 |
|
1591 // The deoptimized case. |
|
1592 // In this case, we can't call dispatch_next() after the frame is |
|
1593 // popped, but instead must save the incoming arguments and restore |
|
1594 // them after deoptimization has occurred. |
|
1595 __ ld(R4_ARG2, in_bytes(Method::const_offset()), R19_method); |
|
1596 __ lhz(R4_ARG2 /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), R4_ARG2); |
|
1597 __ slwi(R4_ARG2, R4_ARG2, Interpreter::logStackElementSize); |
|
1598 __ addi(R5_ARG3, R18_locals, Interpreter::stackElementSize); |
|
1599 __ subf(R5_ARG3, R4_ARG2, R5_ARG3); |
|
1600 // Save these arguments. |
|
1601 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), R16_thread, R4_ARG2, R5_ARG3); |
|
1602 |
|
1603 // Inform deoptimization that it is responsible for restoring these arguments. |
|
1604 __ load_const_optimized(R11_scratch1, JavaThread::popframe_force_deopt_reexecution_bit); |
|
1605 __ stw(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread); |
|
1606 |
|
1607 // Return from the current method into the deoptimization blob. Will eventually |
|
1608 // end up in the deopt interpeter entry, deoptimization prepared everything that |
|
1609 // we will reexecute the call that called us. |
|
1610 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*reload return_pc*/ return_pc, R11_scratch1, R12_scratch2); |
|
1611 __ mtlr(return_pc); |
|
1612 __ blr(); |
|
1613 |
|
1614 // The non-deoptimized case. |
|
1615 __ bind(Lcaller_not_deoptimized); |
|
1616 |
|
1617 // Clear the popframe condition flag. |
|
1618 __ li(R0, 0); |
|
1619 __ stw(R0, in_bytes(JavaThread::popframe_condition_offset()), R16_thread); |
|
1620 |
|
1621 // Get out of the current method and re-execute the call that called us. |
|
1622 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ noreg, R11_scratch1, R12_scratch2); |
|
1623 __ restore_interpreter_state(R11_scratch1); |
|
1624 __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1); |
|
1625 __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0); |
|
1626 if (ProfileInterpreter) { |
|
1627 __ set_method_data_pointer_for_bcp(); |
|
1628 __ ld(R11_scratch1, 0, R1_SP); |
|
1629 __ std(R28_mdx, _ijava_state_neg(mdx), R11_scratch1); |
|
1630 } |
|
1631 #if INCLUDE_JVMTI |
|
1632 Label L_done; |
|
1633 |
|
1634 __ lbz(R11_scratch1, 0, R14_bcp); |
|
1635 __ cmpwi(CCR0, R11_scratch1, Bytecodes::_invokestatic); |
|
1636 __ bne(CCR0, L_done); |
|
1637 |
|
1638 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. |
|
1639 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. |
|
1640 __ ld(R4_ARG2, 0, R18_locals); |
|
1641 __ MacroAssembler::call_VM(R4_ARG2, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R4_ARG2, R19_method, R14_bcp, false); |
|
1642 __ restore_interpreter_state(R11_scratch1, /*bcp_and_mdx_only*/ true); |
|
1643 __ cmpdi(CCR0, R4_ARG2, 0); |
|
1644 __ beq(CCR0, L_done); |
|
1645 __ std(R4_ARG2, wordSize, R15_esp); |
|
1646 __ bind(L_done); |
|
1647 #endif // INCLUDE_JVMTI |
|
1648 __ dispatch_next(vtos); |
|
1649 } |
|
1650 // end of JVMTI PopFrame support |
|
1651 |
|
1652 // -------------------------------------------------------------------------- |
|
1653 // Remove activation exception entry. |
|
1654 // This is jumped to if an interpreted method can't handle an exception itself |
|
1655 // (we come from the throw/rethrow exception entry above). We're going to call |
|
1656 // into the VM to find the exception handler in the caller, pop the current |
|
1657 // frame and return the handler we calculated. |
|
1658 Interpreter::_remove_activation_entry = __ pc(); |
|
1659 { |
|
1660 __ pop_ptr(Rexception); |
|
1661 __ verify_thread(); |
|
1662 __ verify_oop(Rexception); |
|
1663 __ std(Rexception, in_bytes(JavaThread::vm_result_offset()), R16_thread); |
|
1664 |
|
1665 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, true); |
|
1666 __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI, false); |
|
1667 |
|
1668 __ get_vm_result(Rexception); |
|
1669 |
|
1670 // We are done with this activation frame; find out where to go next. |
|
1671 // The continuation point will be an exception handler, which expects |
|
1672 // the following registers set up: |
|
1673 // |
|
1674 // RET: exception oop |
|
1675 // ARG2: Issuing PC (see generate_exception_blob()), only used if the caller is compiled. |
|
1676 |
|
1677 Register return_pc = R31; // Needs to survive the runtime call. |
|
1678 __ ld(return_pc, 0, R1_SP); |
|
1679 __ ld(return_pc, _abi(lr), return_pc); |
|
1680 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), R16_thread, return_pc); |
|
1681 |
|
1682 // Remove the current activation. |
|
1683 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ noreg, R11_scratch1, R12_scratch2); |
|
1684 |
|
1685 __ mr(R4_ARG2, return_pc); |
|
1686 __ mtlr(R3_RET); |
|
1687 __ mr(R3_RET, Rexception); |
|
1688 __ blr(); |
|
1689 } |
|
1690 } |
165 } |
1691 |
166 |
1692 // JVMTI ForceEarlyReturn support. |
|
1693 // Returns "in the middle" of a method with a "fake" return value. |
|
1694 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { |
|
1695 |
167 |
1696 Register Rscratch1 = R11_scratch1, |
|
1697 Rscratch2 = R12_scratch2; |
|
1698 |
|
1699 address entry = __ pc(); |
|
1700 __ empty_expression_stack(); |
|
1701 |
|
1702 __ load_earlyret_value(state, Rscratch1); |
|
1703 |
|
1704 __ ld(Rscratch1, in_bytes(JavaThread::jvmti_thread_state_offset()), R16_thread); |
|
1705 // Clear the earlyret state. |
|
1706 __ li(R0, 0); |
|
1707 __ stw(R0, in_bytes(JvmtiThreadState::earlyret_state_offset()), Rscratch1); |
|
1708 |
|
1709 __ remove_activation(state, false, false); |
|
1710 // Copied from TemplateTable::_return. |
|
1711 // Restoration of lr done by remove_activation. |
|
1712 switch (state) { |
|
1713 case ltos: |
|
1714 case btos: |
|
1715 case ctos: |
|
1716 case stos: |
|
1717 case atos: |
|
1718 case itos: __ mr(R3_RET, R17_tos); break; |
|
1719 case ftos: |
|
1720 case dtos: __ fmr(F1_RET, F15_ftos); break; |
|
1721 case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need |
|
1722 // to get visible before the reference to the object gets stored anywhere. |
|
1723 __ membar(Assembler::StoreStore); break; |
|
1724 default : ShouldNotReachHere(); |
|
1725 } |
|
1726 __ blr(); |
|
1727 |
|
1728 return entry; |
|
1729 } // end of ForceEarlyReturn support |
|
1730 |
|
1731 //----------------------------------------------------------------------------- |
|
1732 // Helper for vtos entry point generation |
|
1733 |
|
1734 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, |
|
1735 address& bep, |
|
1736 address& cep, |
|
1737 address& sep, |
|
1738 address& aep, |
|
1739 address& iep, |
|
1740 address& lep, |
|
1741 address& fep, |
|
1742 address& dep, |
|
1743 address& vep) { |
|
1744 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); |
|
1745 Label L; |
|
1746 |
|
1747 aep = __ pc(); __ push_ptr(); __ b(L); |
|
1748 fep = __ pc(); __ push_f(); __ b(L); |
|
1749 dep = __ pc(); __ push_d(); __ b(L); |
|
1750 lep = __ pc(); __ push_l(); __ b(L); |
|
1751 __ align(32, 12, 24); // align L |
|
1752 bep = cep = sep = |
|
1753 iep = __ pc(); __ push_i(); |
|
1754 vep = __ pc(); |
|
1755 __ bind(L); |
|
1756 generate_and_dispatch(t); |
|
1757 } |
|
1758 |
|
1759 //----------------------------------------------------------------------------- |
|
1760 // Generation of individual instructions |
|
1761 |
|
1762 // helpers for generate_and_dispatch |
|
1763 |
|
1764 InterpreterGenerator::InterpreterGenerator(StubQueue* code) |
|
1765 : TemplateInterpreterGenerator(code) { |
|
1766 generate_all(); // Down here so it can be "virtual". |
|
1767 } |
|
1768 |
|
1769 //----------------------------------------------------------------------------- |
|
1770 |
|
1771 // Non-product code |
|
1772 #ifndef PRODUCT |
|
1773 address TemplateInterpreterGenerator::generate_trace_code(TosState state) { |
|
1774 //__ flush_bundle(); |
|
1775 address entry = __ pc(); |
|
1776 |
|
1777 const char *bname = NULL; |
|
1778 uint tsize = 0; |
|
1779 switch(state) { |
|
1780 case ftos: |
|
1781 bname = "trace_code_ftos {"; |
|
1782 tsize = 2; |
|
1783 break; |
|
1784 case btos: |
|
1785 bname = "trace_code_btos {"; |
|
1786 tsize = 2; |
|
1787 break; |
|
1788 case ctos: |
|
1789 bname = "trace_code_ctos {"; |
|
1790 tsize = 2; |
|
1791 break; |
|
1792 case stos: |
|
1793 bname = "trace_code_stos {"; |
|
1794 tsize = 2; |
|
1795 break; |
|
1796 case itos: |
|
1797 bname = "trace_code_itos {"; |
|
1798 tsize = 2; |
|
1799 break; |
|
1800 case ltos: |
|
1801 bname = "trace_code_ltos {"; |
|
1802 tsize = 3; |
|
1803 break; |
|
1804 case atos: |
|
1805 bname = "trace_code_atos {"; |
|
1806 tsize = 2; |
|
1807 break; |
|
1808 case vtos: |
|
1809 // Note: In case of vtos, the topmost of stack value could be a int or doubl |
|
1810 // In case of a double (2 slots) we won't see the 2nd stack value. |
|
1811 // Maybe we simply should print the topmost 3 stack slots to cope with the problem. |
|
1812 bname = "trace_code_vtos {"; |
|
1813 tsize = 2; |
|
1814 |
|
1815 break; |
|
1816 case dtos: |
|
1817 bname = "trace_code_dtos {"; |
|
1818 tsize = 3; |
|
1819 break; |
|
1820 default: |
|
1821 ShouldNotReachHere(); |
|
1822 } |
|
1823 BLOCK_COMMENT(bname); |
|
1824 |
|
1825 // Support short-cut for TraceBytecodesAt. |
|
1826 // Don't call into the VM if we don't want to trace to speed up things. |
|
1827 Label Lskip_vm_call; |
|
1828 if (TraceBytecodesAt > 0 && TraceBytecodesAt < max_intx) { |
|
1829 int offs1 = __ load_const_optimized(R11_scratch1, (address) &TraceBytecodesAt, R0, true); |
|
1830 int offs2 = __ load_const_optimized(R12_scratch2, (address) &BytecodeCounter::_counter_value, R0, true); |
|
1831 __ ld(R11_scratch1, offs1, R11_scratch1); |
|
1832 __ lwa(R12_scratch2, offs2, R12_scratch2); |
|
1833 __ cmpd(CCR0, R12_scratch2, R11_scratch1); |
|
1834 __ blt(CCR0, Lskip_vm_call); |
|
1835 } |
|
1836 |
|
1837 __ push(state); |
|
1838 // Load 2 topmost expression stack values. |
|
1839 __ ld(R6_ARG4, tsize*Interpreter::stackElementSize, R15_esp); |
|
1840 __ ld(R5_ARG3, Interpreter::stackElementSize, R15_esp); |
|
1841 __ mflr(R31); |
|
1842 __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), /* unused */ R4_ARG2, R5_ARG3, R6_ARG4, false); |
|
1843 __ mtlr(R31); |
|
1844 __ pop(state); |
|
1845 |
|
1846 if (TraceBytecodesAt > 0 && TraceBytecodesAt < max_intx) { |
|
1847 __ bind(Lskip_vm_call); |
|
1848 } |
|
1849 __ blr(); |
|
1850 BLOCK_COMMENT("} trace_code"); |
|
1851 return entry; |
|
1852 } |
|
1853 |
|
1854 void TemplateInterpreterGenerator::count_bytecode() { |
|
1855 int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeCounter::_counter_value, R12_scratch2, true); |
|
1856 __ lwz(R12_scratch2, offs, R11_scratch1); |
|
1857 __ addi(R12_scratch2, R12_scratch2, 1); |
|
1858 __ stw(R12_scratch2, offs, R11_scratch1); |
|
1859 } |
|
1860 |
|
1861 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { |
|
1862 int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeHistogram::_counters[t->bytecode()], R12_scratch2, true); |
|
1863 __ lwz(R12_scratch2, offs, R11_scratch1); |
|
1864 __ addi(R12_scratch2, R12_scratch2, 1); |
|
1865 __ stw(R12_scratch2, offs, R11_scratch1); |
|
1866 } |
|
1867 |
|
1868 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { |
|
1869 const Register addr = R11_scratch1, |
|
1870 tmp = R12_scratch2; |
|
1871 // Get index, shift out old bytecode, bring in new bytecode, and store it. |
|
1872 // _index = (_index >> log2_number_of_codes) | |
|
1873 // (bytecode << log2_number_of_codes); |
|
1874 int offs1 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_index, tmp, true); |
|
1875 __ lwz(tmp, offs1, addr); |
|
1876 __ srwi(tmp, tmp, BytecodePairHistogram::log2_number_of_codes); |
|
1877 __ ori(tmp, tmp, ((int) t->bytecode()) << BytecodePairHistogram::log2_number_of_codes); |
|
1878 __ stw(tmp, offs1, addr); |
|
1879 |
|
1880 // Bump bucket contents. |
|
1881 // _counters[_index] ++; |
|
1882 int offs2 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_counters, R0, true); |
|
1883 __ sldi(tmp, tmp, LogBytesPerInt); |
|
1884 __ add(addr, tmp, addr); |
|
1885 __ lwz(tmp, offs2, addr); |
|
1886 __ addi(tmp, tmp, 1); |
|
1887 __ stw(tmp, offs2, addr); |
|
1888 } |
|
1889 |
|
1890 void TemplateInterpreterGenerator::trace_bytecode(Template* t) { |
|
1891 // Call a little run-time stub to avoid blow-up for each bytecode. |
|
1892 // The run-time runtime saves the right registers, depending on |
|
1893 // the tosca in-state for the given template. |
|
1894 |
|
1895 assert(Interpreter::trace_code(t->tos_in()) != NULL, |
|
1896 "entry must have been generated"); |
|
1897 |
|
1898 // Note: we destroy LR here. |
|
1899 __ bl(Interpreter::trace_code(t->tos_in())); |
|
1900 } |
|
1901 |
|
1902 void TemplateInterpreterGenerator::stop_interpreter_at() { |
|
1903 Label L; |
|
1904 int offs1 = __ load_const_optimized(R11_scratch1, (address) &StopInterpreterAt, R0, true); |
|
1905 int offs2 = __ load_const_optimized(R12_scratch2, (address) &BytecodeCounter::_counter_value, R0, true); |
|
1906 __ ld(R11_scratch1, offs1, R11_scratch1); |
|
1907 __ lwa(R12_scratch2, offs2, R12_scratch2); |
|
1908 __ cmpd(CCR0, R12_scratch2, R11_scratch1); |
|
1909 __ bne(CCR0, L); |
|
1910 __ illtrap(); |
|
1911 __ bind(L); |
|
1912 } |
|
1913 |
|
1914 #endif // !PRODUCT |
|
1915 #endif // !CC_INTERP |
|