|
1 /* |
|
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. |
|
3 * Copyright 2012, 2013 SAP AG. All rights reserved. |
|
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
5 * |
|
6 * This code is free software; you can redistribute it and/or modify it |
|
7 * under the terms of the GNU General Public License version 2 only, as |
|
8 * published by the Free Software Foundation. |
|
9 * |
|
10 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
13 * version 2 for more details (a copy is included in the LICENSE file that |
|
14 * accompanied this code). |
|
15 * |
|
16 * You should have received a copy of the GNU General Public License version |
|
17 * 2 along with this work; if not, write to the Free Software Foundation, |
|
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
19 * |
|
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
21 * or visit www.oracle.com if you need additional information or have any |
|
22 * questions. |
|
23 * |
|
24 */ |
|
25 |
|
26 #include "precompiled.hpp" |
|
27 #include "asm/macroAssembler.inline.hpp" |
|
28 #include "code/debugInfoRec.hpp" |
|
29 #include "code/icBuffer.hpp" |
|
30 #include "code/vtableStubs.hpp" |
|
31 #include "interpreter/interpreter.hpp" |
|
32 #include "oops/compiledICHolder.hpp" |
|
33 #include "prims/jvmtiRedefineClassesTrace.hpp" |
|
34 #include "runtime/sharedRuntime.hpp" |
|
35 #include "runtime/vframeArray.hpp" |
|
36 #include "vmreg_ppc.inline.hpp" |
|
37 #ifdef COMPILER1 |
|
38 #include "c1/c1_Runtime1.hpp" |
|
39 #endif |
|
40 #ifdef COMPILER2 |
|
41 #include "opto/runtime.hpp" |
|
42 #endif |
|
43 |
|
44 #define __ masm-> |
|
45 |
|
46 #ifdef PRODUCT |
|
47 #define BLOCK_COMMENT(str) // nothing |
|
48 #else |
|
49 #define BLOCK_COMMENT(str) __ block_comment(str) |
|
50 #endif |
|
51 |
|
52 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") |
|
53 |
|
54 |
|
55 // Used by generate_deopt_blob. Defined in .ad file. |
|
56 extern uint size_deopt_handler(); |
|
57 |
|
58 |
|
59 class RegisterSaver { |
|
60 // Used for saving volatile registers. |
|
61 public: |
|
62 |
|
63 // Support different return pc locations. |
|
64 enum ReturnPCLocation { |
|
65 return_pc_is_lr, |
|
66 return_pc_is_r4, |
|
67 return_pc_is_thread_saved_exception_pc |
|
68 }; |
|
69 |
|
70 static OopMap* push_frame_abi112_and_save_live_registers(MacroAssembler* masm, |
|
71 int* out_frame_size_in_bytes, |
|
72 bool generate_oop_map, |
|
73 int return_pc_adjustment, |
|
74 ReturnPCLocation return_pc_location); |
|
75 static void restore_live_registers_and_pop_frame(MacroAssembler* masm, |
|
76 int frame_size_in_bytes, |
|
77 bool restore_ctr); |
|
78 |
|
79 static void push_frame_and_save_argument_registers(MacroAssembler* masm, |
|
80 Register r_temp, |
|
81 int frame_size, |
|
82 int total_args, |
|
83 const VMRegPair *regs, const VMRegPair *regs2 = NULL); |
|
84 static void restore_argument_registers_and_pop_frame(MacroAssembler*masm, |
|
85 int frame_size, |
|
86 int total_args, |
|
87 const VMRegPair *regs, const VMRegPair *regs2 = NULL); |
|
88 |
|
89 // During deoptimization only the result registers need to be restored |
|
90 // all the other values have already been extracted. |
|
91 static void restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes); |
|
92 |
|
93 // Constants and data structures: |
|
94 |
|
95 typedef enum { |
|
96 int_reg = 0, |
|
97 float_reg = 1, |
|
98 special_reg = 2 |
|
99 } RegisterType; |
|
100 |
|
101 typedef enum { |
|
102 reg_size = 8, |
|
103 half_reg_size = reg_size / 2, |
|
104 } RegisterConstants; |
|
105 |
|
106 typedef struct { |
|
107 RegisterType reg_type; |
|
108 int reg_num; |
|
109 VMReg vmreg; |
|
110 } LiveRegType; |
|
111 }; |
|
112 |
|
113 |
|
114 #define RegisterSaver_LiveSpecialReg(regname) \ |
|
115 { RegisterSaver::special_reg, regname->encoding(), regname->as_VMReg() } |
|
116 |
|
117 #define RegisterSaver_LiveIntReg(regname) \ |
|
118 { RegisterSaver::int_reg, regname->encoding(), regname->as_VMReg() } |
|
119 |
|
120 #define RegisterSaver_LiveFloatReg(regname) \ |
|
121 { RegisterSaver::float_reg, regname->encoding(), regname->as_VMReg() } |
|
122 |
|
123 static const RegisterSaver::LiveRegType RegisterSaver_LiveRegs[] = { |
|
124 // Live registers which get spilled to the stack. Register |
|
125 // positions in this array correspond directly to the stack layout. |
|
126 |
|
127 // |
|
128 // live special registers: |
|
129 // |
|
130 RegisterSaver_LiveSpecialReg(SR_CTR), |
|
131 // |
|
132 // live float registers: |
|
133 // |
|
134 RegisterSaver_LiveFloatReg( F0 ), |
|
135 RegisterSaver_LiveFloatReg( F1 ), |
|
136 RegisterSaver_LiveFloatReg( F2 ), |
|
137 RegisterSaver_LiveFloatReg( F3 ), |
|
138 RegisterSaver_LiveFloatReg( F4 ), |
|
139 RegisterSaver_LiveFloatReg( F5 ), |
|
140 RegisterSaver_LiveFloatReg( F6 ), |
|
141 RegisterSaver_LiveFloatReg( F7 ), |
|
142 RegisterSaver_LiveFloatReg( F8 ), |
|
143 RegisterSaver_LiveFloatReg( F9 ), |
|
144 RegisterSaver_LiveFloatReg( F10 ), |
|
145 RegisterSaver_LiveFloatReg( F11 ), |
|
146 RegisterSaver_LiveFloatReg( F12 ), |
|
147 RegisterSaver_LiveFloatReg( F13 ), |
|
148 RegisterSaver_LiveFloatReg( F14 ), |
|
149 RegisterSaver_LiveFloatReg( F15 ), |
|
150 RegisterSaver_LiveFloatReg( F16 ), |
|
151 RegisterSaver_LiveFloatReg( F17 ), |
|
152 RegisterSaver_LiveFloatReg( F18 ), |
|
153 RegisterSaver_LiveFloatReg( F19 ), |
|
154 RegisterSaver_LiveFloatReg( F20 ), |
|
155 RegisterSaver_LiveFloatReg( F21 ), |
|
156 RegisterSaver_LiveFloatReg( F22 ), |
|
157 RegisterSaver_LiveFloatReg( F23 ), |
|
158 RegisterSaver_LiveFloatReg( F24 ), |
|
159 RegisterSaver_LiveFloatReg( F25 ), |
|
160 RegisterSaver_LiveFloatReg( F26 ), |
|
161 RegisterSaver_LiveFloatReg( F27 ), |
|
162 RegisterSaver_LiveFloatReg( F28 ), |
|
163 RegisterSaver_LiveFloatReg( F29 ), |
|
164 RegisterSaver_LiveFloatReg( F30 ), |
|
165 RegisterSaver_LiveFloatReg( F31 ), |
|
166 // |
|
167 // live integer registers: |
|
168 // |
|
169 RegisterSaver_LiveIntReg( R0 ), |
|
170 //RegisterSaver_LiveIntReg( R1 ), // stack pointer |
|
171 RegisterSaver_LiveIntReg( R2 ), |
|
172 RegisterSaver_LiveIntReg( R3 ), |
|
173 RegisterSaver_LiveIntReg( R4 ), |
|
174 RegisterSaver_LiveIntReg( R5 ), |
|
175 RegisterSaver_LiveIntReg( R6 ), |
|
176 RegisterSaver_LiveIntReg( R7 ), |
|
177 RegisterSaver_LiveIntReg( R8 ), |
|
178 RegisterSaver_LiveIntReg( R9 ), |
|
179 RegisterSaver_LiveIntReg( R10 ), |
|
180 RegisterSaver_LiveIntReg( R11 ), |
|
181 RegisterSaver_LiveIntReg( R12 ), |
|
182 //RegisterSaver_LiveIntReg( R13 ), // system thread id |
|
183 RegisterSaver_LiveIntReg( R14 ), |
|
184 RegisterSaver_LiveIntReg( R15 ), |
|
185 RegisterSaver_LiveIntReg( R16 ), |
|
186 RegisterSaver_LiveIntReg( R17 ), |
|
187 RegisterSaver_LiveIntReg( R18 ), |
|
188 RegisterSaver_LiveIntReg( R19 ), |
|
189 RegisterSaver_LiveIntReg( R20 ), |
|
190 RegisterSaver_LiveIntReg( R21 ), |
|
191 RegisterSaver_LiveIntReg( R22 ), |
|
192 RegisterSaver_LiveIntReg( R23 ), |
|
193 RegisterSaver_LiveIntReg( R24 ), |
|
194 RegisterSaver_LiveIntReg( R25 ), |
|
195 RegisterSaver_LiveIntReg( R26 ), |
|
196 RegisterSaver_LiveIntReg( R27 ), |
|
197 RegisterSaver_LiveIntReg( R28 ), |
|
198 RegisterSaver_LiveIntReg( R29 ), |
|
199 RegisterSaver_LiveIntReg( R31 ), |
|
200 RegisterSaver_LiveIntReg( R30 ), // r30 must be the last register |
|
201 }; |
|
202 |
|
203 OopMap* RegisterSaver::push_frame_abi112_and_save_live_registers(MacroAssembler* masm, |
|
204 int* out_frame_size_in_bytes, |
|
205 bool generate_oop_map, |
|
206 int return_pc_adjustment, |
|
207 ReturnPCLocation return_pc_location) { |
|
208 // Push an abi112-frame and store all registers which may be live. |
|
209 // If requested, create an OopMap: Record volatile registers as |
|
210 // callee-save values in an OopMap so their save locations will be |
|
211 // propagated to the RegisterMap of the caller frame during |
|
212 // StackFrameStream construction (needed for deoptimization; see |
|
213 // compiledVFrame::create_stack_value). |
|
214 // If return_pc_adjustment != 0 adjust the return pc by return_pc_adjustment. |
|
215 |
|
216 int i; |
|
217 int offset; |
|
218 |
|
219 // calcualte frame size |
|
220 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) / |
|
221 sizeof(RegisterSaver::LiveRegType); |
|
222 const int register_save_size = regstosave_num * reg_size; |
|
223 const int frame_size_in_bytes = round_to(register_save_size, frame::alignment_in_bytes) |
|
224 + frame::abi_112_size; |
|
225 *out_frame_size_in_bytes = frame_size_in_bytes; |
|
226 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); |
|
227 const int register_save_offset = frame_size_in_bytes - register_save_size; |
|
228 |
|
229 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words. |
|
230 OopMap* map = generate_oop_map ? new OopMap(frame_size_in_slots, 0) : NULL; |
|
231 |
|
232 BLOCK_COMMENT("push_frame_abi112_and_save_live_registers {"); |
|
233 |
|
234 // Save r30 in the last slot of the not yet pushed frame so that we |
|
235 // can use it as scratch reg. |
|
236 __ std(R30, -reg_size, R1_SP); |
|
237 assert(-reg_size == register_save_offset - frame_size_in_bytes + ((regstosave_num-1)*reg_size), |
|
238 "consistency check"); |
|
239 |
|
240 // save the flags |
|
241 // Do the save_LR_CR by hand and adjust the return pc if requested. |
|
242 __ mfcr(R30); |
|
243 __ std(R30, _abi(cr), R1_SP); |
|
244 switch (return_pc_location) { |
|
245 case return_pc_is_lr: __ mflr(R30); break; |
|
246 case return_pc_is_r4: __ mr(R30, R4); break; |
|
247 case return_pc_is_thread_saved_exception_pc: |
|
248 __ ld(R30, thread_(saved_exception_pc)); break; |
|
249 default: ShouldNotReachHere(); |
|
250 } |
|
251 if (return_pc_adjustment != 0) |
|
252 __ addi(R30, R30, return_pc_adjustment); |
|
253 __ std(R30, _abi(lr), R1_SP); |
|
254 |
|
255 // push a new frame |
|
256 __ push_frame(frame_size_in_bytes, R30); |
|
257 |
|
258 // save all registers (ints and floats) |
|
259 offset = register_save_offset; |
|
260 for (int i = 0; i < regstosave_num; i++) { |
|
261 int reg_num = RegisterSaver_LiveRegs[i].reg_num; |
|
262 int reg_type = RegisterSaver_LiveRegs[i].reg_type; |
|
263 |
|
264 switch (reg_type) { |
|
265 case RegisterSaver::int_reg: { |
|
266 if (reg_num != 30) { // We spilled R30 right at the beginning. |
|
267 __ std(as_Register(reg_num), offset, R1_SP); |
|
268 } |
|
269 break; |
|
270 } |
|
271 case RegisterSaver::float_reg: { |
|
272 __ stfd(as_FloatRegister(reg_num), offset, R1_SP); |
|
273 break; |
|
274 } |
|
275 case RegisterSaver::special_reg: { |
|
276 if (reg_num == SR_CTR_SpecialRegisterEnumValue) { |
|
277 __ mfctr(R30); |
|
278 __ std(R30, offset, R1_SP); |
|
279 } else { |
|
280 Unimplemented(); |
|
281 } |
|
282 break; |
|
283 } |
|
284 default: |
|
285 ShouldNotReachHere(); |
|
286 } |
|
287 |
|
288 if (generate_oop_map) { |
|
289 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), |
|
290 RegisterSaver_LiveRegs[i].vmreg); |
|
291 map->set_callee_saved(VMRegImpl::stack2reg((offset + half_reg_size)>>2), |
|
292 RegisterSaver_LiveRegs[i].vmreg->next()); |
|
293 } |
|
294 offset += reg_size; |
|
295 } |
|
296 |
|
297 BLOCK_COMMENT("} push_frame_abi112_and_save_live_registers"); |
|
298 |
|
299 // And we're done. |
|
300 return map; |
|
301 } |
|
302 |
|
303 |
|
304 // Pop the current frame and restore all the registers that we |
|
305 // saved. |
|
306 void RegisterSaver::restore_live_registers_and_pop_frame(MacroAssembler* masm, |
|
307 int frame_size_in_bytes, |
|
308 bool restore_ctr) { |
|
309 int i; |
|
310 int offset; |
|
311 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) / |
|
312 sizeof(RegisterSaver::LiveRegType); |
|
313 const int register_save_size = regstosave_num * reg_size; |
|
314 const int register_save_offset = frame_size_in_bytes - register_save_size; |
|
315 |
|
316 BLOCK_COMMENT("restore_live_registers_and_pop_frame {"); |
|
317 |
|
318 // restore all registers (ints and floats) |
|
319 offset = register_save_offset; |
|
320 for (int i = 0; i < regstosave_num; i++) { |
|
321 int reg_num = RegisterSaver_LiveRegs[i].reg_num; |
|
322 int reg_type = RegisterSaver_LiveRegs[i].reg_type; |
|
323 |
|
324 switch (reg_type) { |
|
325 case RegisterSaver::int_reg: { |
|
326 if (reg_num != 30) // R30 restored at the end, it's the tmp reg! |
|
327 __ ld(as_Register(reg_num), offset, R1_SP); |
|
328 break; |
|
329 } |
|
330 case RegisterSaver::float_reg: { |
|
331 __ lfd(as_FloatRegister(reg_num), offset, R1_SP); |
|
332 break; |
|
333 } |
|
334 case RegisterSaver::special_reg: { |
|
335 if (reg_num == SR_CTR_SpecialRegisterEnumValue) { |
|
336 if (restore_ctr) { // Nothing to do here if ctr already contains the next address. |
|
337 __ ld(R30, offset, R1_SP); |
|
338 __ mtctr(R30); |
|
339 } |
|
340 } else { |
|
341 Unimplemented(); |
|
342 } |
|
343 break; |
|
344 } |
|
345 default: |
|
346 ShouldNotReachHere(); |
|
347 } |
|
348 offset += reg_size; |
|
349 } |
|
350 |
|
351 // pop the frame |
|
352 __ pop_frame(); |
|
353 |
|
354 // restore the flags |
|
355 __ restore_LR_CR(R30); |
|
356 |
|
357 // restore scratch register's value |
|
358 __ ld(R30, -reg_size, R1_SP); |
|
359 |
|
360 BLOCK_COMMENT("} restore_live_registers_and_pop_frame"); |
|
361 } |
|
362 |
|
363 void RegisterSaver::push_frame_and_save_argument_registers(MacroAssembler* masm, Register r_temp, |
|
364 int frame_size,int total_args, const VMRegPair *regs, |
|
365 const VMRegPair *regs2) { |
|
366 __ push_frame(frame_size, r_temp); |
|
367 int st_off = frame_size - wordSize; |
|
368 for (int i = 0; i < total_args; i++) { |
|
369 VMReg r_1 = regs[i].first(); |
|
370 VMReg r_2 = regs[i].second(); |
|
371 if (!r_1->is_valid()) { |
|
372 assert(!r_2->is_valid(), ""); |
|
373 continue; |
|
374 } |
|
375 if (r_1->is_Register()) { |
|
376 Register r = r_1->as_Register(); |
|
377 __ std(r, st_off, R1_SP); |
|
378 st_off -= wordSize; |
|
379 } else if (r_1->is_FloatRegister()) { |
|
380 FloatRegister f = r_1->as_FloatRegister(); |
|
381 __ stfd(f, st_off, R1_SP); |
|
382 st_off -= wordSize; |
|
383 } |
|
384 } |
|
385 if (regs2 != NULL) { |
|
386 for (int i = 0; i < total_args; i++) { |
|
387 VMReg r_1 = regs2[i].first(); |
|
388 VMReg r_2 = regs2[i].second(); |
|
389 if (!r_1->is_valid()) { |
|
390 assert(!r_2->is_valid(), ""); |
|
391 continue; |
|
392 } |
|
393 if (r_1->is_Register()) { |
|
394 Register r = r_1->as_Register(); |
|
395 __ std(r, st_off, R1_SP); |
|
396 st_off -= wordSize; |
|
397 } else if (r_1->is_FloatRegister()) { |
|
398 FloatRegister f = r_1->as_FloatRegister(); |
|
399 __ stfd(f, st_off, R1_SP); |
|
400 st_off -= wordSize; |
|
401 } |
|
402 } |
|
403 } |
|
404 } |
|
405 |
|
406 void RegisterSaver::restore_argument_registers_and_pop_frame(MacroAssembler*masm, int frame_size, |
|
407 int total_args, const VMRegPair *regs, |
|
408 const VMRegPair *regs2) { |
|
409 int st_off = frame_size - wordSize; |
|
410 for (int i = 0; i < total_args; i++) { |
|
411 VMReg r_1 = regs[i].first(); |
|
412 VMReg r_2 = regs[i].second(); |
|
413 if (r_1->is_Register()) { |
|
414 Register r = r_1->as_Register(); |
|
415 __ ld(r, st_off, R1_SP); |
|
416 st_off -= wordSize; |
|
417 } else if (r_1->is_FloatRegister()) { |
|
418 FloatRegister f = r_1->as_FloatRegister(); |
|
419 __ lfd(f, st_off, R1_SP); |
|
420 st_off -= wordSize; |
|
421 } |
|
422 } |
|
423 if (regs2 != NULL) |
|
424 for (int i = 0; i < total_args; i++) { |
|
425 VMReg r_1 = regs2[i].first(); |
|
426 VMReg r_2 = regs2[i].second(); |
|
427 if (r_1->is_Register()) { |
|
428 Register r = r_1->as_Register(); |
|
429 __ ld(r, st_off, R1_SP); |
|
430 st_off -= wordSize; |
|
431 } else if (r_1->is_FloatRegister()) { |
|
432 FloatRegister f = r_1->as_FloatRegister(); |
|
433 __ lfd(f, st_off, R1_SP); |
|
434 st_off -= wordSize; |
|
435 } |
|
436 } |
|
437 __ pop_frame(); |
|
438 } |
|
439 |
|
440 // Restore the registers that might be holding a result. |
|
441 void RegisterSaver::restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes) { |
|
442 int i; |
|
443 int offset; |
|
444 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) / |
|
445 sizeof(RegisterSaver::LiveRegType); |
|
446 const int register_save_size = regstosave_num * reg_size; |
|
447 const int register_save_offset = frame_size_in_bytes - register_save_size; |
|
448 |
|
449 // restore all result registers (ints and floats) |
|
450 offset = register_save_offset; |
|
451 for (int i = 0; i < regstosave_num; i++) { |
|
452 int reg_num = RegisterSaver_LiveRegs[i].reg_num; |
|
453 int reg_type = RegisterSaver_LiveRegs[i].reg_type; |
|
454 switch (reg_type) { |
|
455 case RegisterSaver::int_reg: { |
|
456 if (as_Register(reg_num)==R3_RET) // int result_reg |
|
457 __ ld(as_Register(reg_num), offset, R1_SP); |
|
458 break; |
|
459 } |
|
460 case RegisterSaver::float_reg: { |
|
461 if (as_FloatRegister(reg_num)==F1_RET) // float result_reg |
|
462 __ lfd(as_FloatRegister(reg_num), offset, R1_SP); |
|
463 break; |
|
464 } |
|
465 case RegisterSaver::special_reg: { |
|
466 // Special registers don't hold a result. |
|
467 break; |
|
468 } |
|
469 default: |
|
470 ShouldNotReachHere(); |
|
471 } |
|
472 offset += reg_size; |
|
473 } |
|
474 } |
|
475 |
|
476 // Is vector's size (in bytes) bigger than a size saved by default? |
|
477 bool SharedRuntime::is_wide_vector(int size) { |
|
478 ResourceMark rm; |
|
479 // Note, MaxVectorSize == 8 on PPC64. |
|
480 assert(size <= 8, err_msg_res("%d bytes vectors are not supported", size)); |
|
481 return size > 8; |
|
482 } |
|
483 #ifdef COMPILER2 |
|
484 static int reg2slot(VMReg r) { |
|
485 return r->reg2stack() + SharedRuntime::out_preserve_stack_slots(); |
|
486 } |
|
487 |
|
488 static int reg2offset(VMReg r) { |
|
489 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; |
|
490 } |
|
491 #endif |
|
492 |
|
493 // --------------------------------------------------------------------------- |
|
494 // Read the array of BasicTypes from a signature, and compute where the |
|
495 // arguments should go. Values in the VMRegPair regs array refer to 4-byte |
|
496 // quantities. Values less than VMRegImpl::stack0 are registers, those above |
|
497 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer |
|
498 // as framesizes are fixed. |
|
499 // VMRegImpl::stack0 refers to the first slot 0(sp). |
|
500 // and VMRegImpl::stack0+1 refers to the memory word 4-bytes higher. Register |
|
501 // up to RegisterImpl::number_of_registers) are the 64-bit |
|
502 // integer registers. |
|
503 |
|
504 // Note: the INPUTS in sig_bt are in units of Java argument words, which are |
|
505 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit |
|
506 // units regardless of build. Of course for i486 there is no 64 bit build |
|
507 |
|
508 // The Java calling convention is a "shifted" version of the C ABI. |
|
509 // By skipping the first C ABI register we can call non-static jni methods |
|
510 // with small numbers of arguments without having to shuffle the arguments |
|
511 // at all. Since we control the java ABI we ought to at least get some |
|
512 // advantage out of it. |
|
513 |
|
514 const VMReg java_iarg_reg[8] = { |
|
515 R3->as_VMReg(), |
|
516 R4->as_VMReg(), |
|
517 R5->as_VMReg(), |
|
518 R6->as_VMReg(), |
|
519 R7->as_VMReg(), |
|
520 R8->as_VMReg(), |
|
521 R9->as_VMReg(), |
|
522 R10->as_VMReg() |
|
523 }; |
|
524 |
|
525 const VMReg java_farg_reg[13] = { |
|
526 F1->as_VMReg(), |
|
527 F2->as_VMReg(), |
|
528 F3->as_VMReg(), |
|
529 F4->as_VMReg(), |
|
530 F5->as_VMReg(), |
|
531 F6->as_VMReg(), |
|
532 F7->as_VMReg(), |
|
533 F8->as_VMReg(), |
|
534 F9->as_VMReg(), |
|
535 F10->as_VMReg(), |
|
536 F11->as_VMReg(), |
|
537 F12->as_VMReg(), |
|
538 F13->as_VMReg() |
|
539 }; |
|
540 |
|
541 const int num_java_iarg_registers = sizeof(java_iarg_reg) / sizeof(java_iarg_reg[0]); |
|
542 const int num_java_farg_registers = sizeof(java_farg_reg) / sizeof(java_farg_reg[0]); |
|
543 |
|
544 int SharedRuntime::java_calling_convention(const BasicType *sig_bt, |
|
545 VMRegPair *regs, |
|
546 int total_args_passed, |
|
547 int is_outgoing) { |
|
548 // C2c calling conventions for compiled-compiled calls. |
|
549 // Put 8 ints/longs into registers _AND_ 13 float/doubles into |
|
550 // registers _AND_ put the rest on the stack. |
|
551 |
|
552 const int inc_stk_for_intfloat = 1; // 1 slots for ints and floats |
|
553 const int inc_stk_for_longdouble = 2; // 2 slots for longs and doubles |
|
554 |
|
555 int i; |
|
556 VMReg reg; |
|
557 int stk = 0; |
|
558 int ireg = 0; |
|
559 int freg = 0; |
|
560 |
|
561 // We put the first 8 arguments into registers and the rest on the |
|
562 // stack, float arguments are already in their argument registers |
|
563 // due to c2c calling conventions (see calling_convention). |
|
564 for (int i = 0; i < total_args_passed; ++i) { |
|
565 switch(sig_bt[i]) { |
|
566 case T_BOOLEAN: |
|
567 case T_CHAR: |
|
568 case T_BYTE: |
|
569 case T_SHORT: |
|
570 case T_INT: |
|
571 if (ireg < num_java_iarg_registers) { |
|
572 // Put int/ptr in register |
|
573 reg = java_iarg_reg[ireg]; |
|
574 ++ireg; |
|
575 } else { |
|
576 // Put int/ptr on stack. |
|
577 reg = VMRegImpl::stack2reg(stk); |
|
578 stk += inc_stk_for_intfloat; |
|
579 } |
|
580 regs[i].set1(reg); |
|
581 break; |
|
582 case T_LONG: |
|
583 assert(sig_bt[i+1] == T_VOID, "expecting half"); |
|
584 if (ireg < num_java_iarg_registers) { |
|
585 // Put long in register. |
|
586 reg = java_iarg_reg[ireg]; |
|
587 ++ireg; |
|
588 } else { |
|
589 // Put long on stack. They must be aligned to 2 slots. |
|
590 if (stk & 0x1) ++stk; |
|
591 reg = VMRegImpl::stack2reg(stk); |
|
592 stk += inc_stk_for_longdouble; |
|
593 } |
|
594 regs[i].set2(reg); |
|
595 break; |
|
596 case T_OBJECT: |
|
597 case T_ARRAY: |
|
598 case T_ADDRESS: |
|
599 if (ireg < num_java_iarg_registers) { |
|
600 // Put ptr in register. |
|
601 reg = java_iarg_reg[ireg]; |
|
602 ++ireg; |
|
603 } else { |
|
604 // Put ptr on stack. Objects must be aligned to 2 slots too, |
|
605 // because "64-bit pointers record oop-ishness on 2 aligned |
|
606 // adjacent registers." (see OopFlow::build_oop_map). |
|
607 if (stk & 0x1) ++stk; |
|
608 reg = VMRegImpl::stack2reg(stk); |
|
609 stk += inc_stk_for_longdouble; |
|
610 } |
|
611 regs[i].set2(reg); |
|
612 break; |
|
613 case T_FLOAT: |
|
614 if (freg < num_java_farg_registers) { |
|
615 // Put float in register. |
|
616 reg = java_farg_reg[freg]; |
|
617 ++freg; |
|
618 } else { |
|
619 // Put float on stack. |
|
620 reg = VMRegImpl::stack2reg(stk); |
|
621 stk += inc_stk_for_intfloat; |
|
622 } |
|
623 regs[i].set1(reg); |
|
624 break; |
|
625 case T_DOUBLE: |
|
626 assert(sig_bt[i+1] == T_VOID, "expecting half"); |
|
627 if (freg < num_java_farg_registers) { |
|
628 // Put double in register. |
|
629 reg = java_farg_reg[freg]; |
|
630 ++freg; |
|
631 } else { |
|
632 // Put double on stack. They must be aligned to 2 slots. |
|
633 if (stk & 0x1) ++stk; |
|
634 reg = VMRegImpl::stack2reg(stk); |
|
635 stk += inc_stk_for_longdouble; |
|
636 } |
|
637 regs[i].set2(reg); |
|
638 break; |
|
639 case T_VOID: |
|
640 // Do not count halves. |
|
641 regs[i].set_bad(); |
|
642 break; |
|
643 default: |
|
644 ShouldNotReachHere(); |
|
645 } |
|
646 } |
|
647 return round_to(stk, 2); |
|
648 } |
|
649 |
|
650 #ifdef COMPILER2 |
|
651 // Calling convention for calling C code. |
|
652 int SharedRuntime::c_calling_convention(const BasicType *sig_bt, |
|
653 VMRegPair *regs, |
|
654 VMRegPair *regs2, |
|
655 int total_args_passed) { |
|
656 // Calling conventions for C runtime calls and calls to JNI native methods. |
|
657 // |
|
658 // PPC64 convention: Hoist the first 8 int/ptr/long's in the first 8 |
|
659 // int regs, leaving int regs undefined if the arg is flt/dbl. Hoist |
|
660 // the first 13 flt/dbl's in the first 13 fp regs but additionally |
|
661 // copy flt/dbl to the stack if they are beyond the 8th argument. |
|
662 |
|
663 const VMReg iarg_reg[8] = { |
|
664 R3->as_VMReg(), |
|
665 R4->as_VMReg(), |
|
666 R5->as_VMReg(), |
|
667 R6->as_VMReg(), |
|
668 R7->as_VMReg(), |
|
669 R8->as_VMReg(), |
|
670 R9->as_VMReg(), |
|
671 R10->as_VMReg() |
|
672 }; |
|
673 |
|
674 const VMReg farg_reg[13] = { |
|
675 F1->as_VMReg(), |
|
676 F2->as_VMReg(), |
|
677 F3->as_VMReg(), |
|
678 F4->as_VMReg(), |
|
679 F5->as_VMReg(), |
|
680 F6->as_VMReg(), |
|
681 F7->as_VMReg(), |
|
682 F8->as_VMReg(), |
|
683 F9->as_VMReg(), |
|
684 F10->as_VMReg(), |
|
685 F11->as_VMReg(), |
|
686 F12->as_VMReg(), |
|
687 F13->as_VMReg() |
|
688 }; |
|
689 |
|
690 const int num_iarg_registers = sizeof(iarg_reg) / sizeof(iarg_reg[0]); |
|
691 const int num_farg_registers = sizeof(farg_reg) / sizeof(farg_reg[0]); |
|
692 |
|
693 // The first 8 arguments are not passed on the stack. |
|
694 const int num_args_in_regs = 8; |
|
695 #define put_arg_in_reg(arg) ((arg) < num_args_in_regs) |
|
696 |
|
697 // Check calling conventions consistency. |
|
698 assert(num_iarg_registers == num_args_in_regs |
|
699 && num_iarg_registers == 8 |
|
700 && num_farg_registers == 13, |
|
701 "consistency"); |
|
702 |
|
703 // `Stk' counts stack slots. Due to alignment, 32 bit values occupy |
|
704 // 2 such slots, like 64 bit values do. |
|
705 const int inc_stk_for_intfloat = 2; // 2 slots for ints and floats |
|
706 const int inc_stk_for_longdouble = 2; // 2 slots for longs and doubles |
|
707 |
|
708 int ill_i = 0; |
|
709 int ill_t = 0; |
|
710 int i; |
|
711 VMReg reg; |
|
712 // Leave room for C-compatible ABI_112. |
|
713 int stk = (frame::abi_112_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size; |
|
714 int arg = 0; |
|
715 int freg = 0; |
|
716 |
|
717 // Avoid passing C arguments in the wrong stack slots. |
|
718 assert((SharedRuntime::out_preserve_stack_slots() + stk) * VMRegImpl::stack_slot_size == 112, |
|
719 "passing C arguments in wrong stack slots"); |
|
720 |
|
721 // We fill-out regs AND regs2 if an argument must be passed in a |
|
722 // register AND in a stack slot. If regs2 is NULL in such a |
|
723 // situation, we bail-out with a fatal error. |
|
724 for (int i = 0; i < total_args_passed; ++i, ++arg) { |
|
725 // Initialize regs2 to BAD. |
|
726 if (regs2 != NULL) regs2[i].set_bad(); |
|
727 |
|
728 switch(sig_bt[i]) { |
|
729 case T_BOOLEAN: |
|
730 case T_CHAR: |
|
731 case T_BYTE: |
|
732 case T_SHORT: |
|
733 case T_INT: |
|
734 // We must cast ints to longs and use full 64 bit stack slots |
|
735 // here. We do the cast in GraphKit::gen_stub() and just guard |
|
736 // here against loosing that change. |
|
737 Unimplemented(); // TODO: PPC port |
|
738 /* |
|
739 assert(SharedRuntime::c_calling_convention_requires_ints_as_longs(), |
|
740 "argument of type int should be promoted to type long"); |
|
741 */ |
|
742 guarantee(i > 0 && sig_bt[i-1] == T_LONG, |
|
743 "argument of type (bt) should have been promoted to type (T_LONG,bt) for bt in " |
|
744 "{T_BOOLEAN, T_CHAR, T_BYTE, T_SHORT, T_INT}"); |
|
745 // Do not count halves. |
|
746 regs[i].set_bad(); |
|
747 --arg; |
|
748 break; |
|
749 case T_LONG: |
|
750 guarantee(sig_bt[i+1] == T_VOID || |
|
751 sig_bt[i+1] == T_BOOLEAN || sig_bt[i+1] == T_CHAR || |
|
752 sig_bt[i+1] == T_BYTE || sig_bt[i+1] == T_SHORT || |
|
753 sig_bt[i+1] == T_INT, |
|
754 "expecting type (T_LONG,half) or type (T_LONG,bt) with bt in {T_BOOLEAN, T_CHAR, T_BYTE, T_SHORT, T_INT}"); |
|
755 case T_OBJECT: |
|
756 case T_ARRAY: |
|
757 case T_ADDRESS: |
|
758 case T_METADATA: |
|
759 // Oops are already boxed if required (JNI). |
|
760 if (put_arg_in_reg(arg)) { |
|
761 reg = iarg_reg[arg]; |
|
762 } else { |
|
763 reg = VMRegImpl::stack2reg(stk); |
|
764 stk += inc_stk_for_longdouble; |
|
765 } |
|
766 regs[i].set2(reg); |
|
767 break; |
|
768 case T_FLOAT: |
|
769 if (put_arg_in_reg(arg)) { |
|
770 reg = farg_reg[freg]; |
|
771 } else { |
|
772 // Put float on stack |
|
773 # if defined(LINUX) |
|
774 reg = VMRegImpl::stack2reg(stk+1); |
|
775 # elif defined(AIX) |
|
776 reg = VMRegImpl::stack2reg(stk); |
|
777 # else |
|
778 # error "unknown OS" |
|
779 # endif |
|
780 stk += inc_stk_for_intfloat; |
|
781 } |
|
782 |
|
783 if (freg < num_farg_registers) { |
|
784 // There are still some float argument registers left. Put the |
|
785 // float in a register if not already done. |
|
786 if (reg != farg_reg[freg]) { |
|
787 guarantee(regs2 != NULL, "must pass float in register and stack slot"); |
|
788 VMReg reg2 = farg_reg[freg]; |
|
789 regs2[i].set1(reg2); |
|
790 } |
|
791 ++freg; |
|
792 } |
|
793 |
|
794 regs[i].set1(reg); |
|
795 break; |
|
796 case T_DOUBLE: |
|
797 assert(sig_bt[i+1] == T_VOID, "expecting half"); |
|
798 if (put_arg_in_reg(arg)) { |
|
799 reg = farg_reg[freg]; |
|
800 } else { |
|
801 // Put double on stack. |
|
802 reg = VMRegImpl::stack2reg(stk); |
|
803 stk += inc_stk_for_longdouble; |
|
804 } |
|
805 |
|
806 if (freg < num_farg_registers) { |
|
807 // There are still some float argument registers left. Put the |
|
808 // float in a register if not already done. |
|
809 if (reg != farg_reg[freg]) { |
|
810 guarantee(regs2 != NULL, "must pass float in register and stack slot"); |
|
811 VMReg reg2 = farg_reg[freg]; |
|
812 regs2[i].set2(reg2); |
|
813 } |
|
814 ++freg; |
|
815 } |
|
816 |
|
817 regs[i].set2(reg); |
|
818 break; |
|
819 case T_VOID: |
|
820 // Do not count halves. |
|
821 regs[i].set_bad(); |
|
822 --arg; |
|
823 break; |
|
824 default: |
|
825 ShouldNotReachHere(); |
|
826 } |
|
827 } |
|
828 |
|
829 return round_to(stk, 2); |
|
830 } |
|
831 #endif // COMPILER2 |
|
832 |
|
833 static address gen_c2i_adapter(MacroAssembler *masm, |
|
834 int total_args_passed, |
|
835 int comp_args_on_stack, |
|
836 const BasicType *sig_bt, |
|
837 const VMRegPair *regs, |
|
838 Label& call_interpreter, |
|
839 const Register& ientry) { |
|
840 |
|
841 address c2i_entrypoint; |
|
842 |
|
843 const Register sender_SP = R21_sender_SP; // == R21_tmp1 |
|
844 const Register code = R22_tmp2; |
|
845 //const Register ientry = R23_tmp3; |
|
846 const Register value_regs[] = { R24_tmp4, R25_tmp5, R26_tmp6 }; |
|
847 const int num_value_regs = sizeof(value_regs) / sizeof(Register); |
|
848 int value_regs_index = 0; |
|
849 |
|
850 const Register return_pc = R27_tmp7; |
|
851 const Register tmp = R28_tmp8; |
|
852 |
|
853 assert_different_registers(sender_SP, code, ientry, return_pc, tmp); |
|
854 |
|
855 // Adapter needs TOP_IJAVA_FRAME_ABI. |
|
856 const int adapter_size = frame::top_ijava_frame_abi_size + |
|
857 round_to(total_args_passed * wordSize, frame::alignment_in_bytes); |
|
858 |
|
859 |
|
860 // regular (verified) c2i entry point |
|
861 c2i_entrypoint = __ pc(); |
|
862 |
|
863 // Does compiled code exists? If yes, patch the caller's callsite. |
|
864 __ ld(code, method_(code)); |
|
865 __ cmpdi(CCR0, code, 0); |
|
866 __ ld(ientry, method_(interpreter_entry)); // preloaded |
|
867 __ beq(CCR0, call_interpreter); |
|
868 |
|
869 |
|
870 // Patch caller's callsite, method_(code) was not NULL which means that |
|
871 // compiled code exists. |
|
872 __ mflr(return_pc); |
|
873 __ std(return_pc, _abi(lr), R1_SP); |
|
874 RegisterSaver::push_frame_and_save_argument_registers(masm, tmp, adapter_size, total_args_passed, regs); |
|
875 |
|
876 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), R19_method, return_pc); |
|
877 |
|
878 RegisterSaver::restore_argument_registers_and_pop_frame(masm, adapter_size, total_args_passed, regs); |
|
879 __ ld(return_pc, _abi(lr), R1_SP); |
|
880 __ ld(ientry, method_(interpreter_entry)); // preloaded |
|
881 __ mtlr(return_pc); |
|
882 |
|
883 |
|
884 // call the interpreter |
|
885 __ BIND(call_interpreter); |
|
886 __ mtctr(ientry); |
|
887 |
|
888 // Get a copy of the current SP for loading caller's arguments. |
|
889 __ mr(sender_SP, R1_SP); |
|
890 |
|
891 // Add space for the adapter. |
|
892 __ resize_frame(-adapter_size, R12_scratch2); |
|
893 |
|
894 int st_off = adapter_size - wordSize; |
|
895 |
|
896 // Write the args into the outgoing interpreter space. |
|
897 for (int i = 0; i < total_args_passed; i++) { |
|
898 VMReg r_1 = regs[i].first(); |
|
899 VMReg r_2 = regs[i].second(); |
|
900 if (!r_1->is_valid()) { |
|
901 assert(!r_2->is_valid(), ""); |
|
902 continue; |
|
903 } |
|
904 if (r_1->is_stack()) { |
|
905 Register tmp_reg = value_regs[value_regs_index]; |
|
906 value_regs_index = (value_regs_index + 1) % num_value_regs; |
|
907 // The calling convention produces OptoRegs that ignore the out |
|
908 // preserve area (JIT's ABI). We must account for it here. |
|
909 int ld_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; |
|
910 if (!r_2->is_valid()) { |
|
911 __ lwz(tmp_reg, ld_off, sender_SP); |
|
912 } else { |
|
913 __ ld(tmp_reg, ld_off, sender_SP); |
|
914 } |
|
915 // Pretend stack targets were loaded into tmp_reg. |
|
916 r_1 = tmp_reg->as_VMReg(); |
|
917 } |
|
918 |
|
919 if (r_1->is_Register()) { |
|
920 Register r = r_1->as_Register(); |
|
921 if (!r_2->is_valid()) { |
|
922 __ stw(r, st_off, R1_SP); |
|
923 st_off-=wordSize; |
|
924 } else { |
|
925 // Longs are given 2 64-bit slots in the interpreter, but the |
|
926 // data is passed in only 1 slot. |
|
927 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { |
|
928 DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); ) |
|
929 st_off-=wordSize; |
|
930 } |
|
931 __ std(r, st_off, R1_SP); |
|
932 st_off-=wordSize; |
|
933 } |
|
934 } else { |
|
935 assert(r_1->is_FloatRegister(), ""); |
|
936 FloatRegister f = r_1->as_FloatRegister(); |
|
937 if (!r_2->is_valid()) { |
|
938 __ stfs(f, st_off, R1_SP); |
|
939 st_off-=wordSize; |
|
940 } else { |
|
941 // In 64bit, doubles are given 2 64-bit slots in the interpreter, but the |
|
942 // data is passed in only 1 slot. |
|
943 // One of these should get known junk... |
|
944 DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); ) |
|
945 st_off-=wordSize; |
|
946 __ stfd(f, st_off, R1_SP); |
|
947 st_off-=wordSize; |
|
948 } |
|
949 } |
|
950 } |
|
951 |
|
952 // Jump to the interpreter just as if interpreter was doing it. |
|
953 |
|
954 // load TOS |
|
955 __ addi(R17_tos, R1_SP, st_off); |
|
956 |
|
957 // Frame_manager expects initial_caller_sp (= SP without resize by c2i) in R21_tmp1. |
|
958 assert(sender_SP == R21_sender_SP, "passing initial caller's SP in wrong register"); |
|
959 __ bctr(); |
|
960 |
|
961 return c2i_entrypoint; |
|
962 } |
|
963 |
|
964 static void gen_i2c_adapter(MacroAssembler *masm, |
|
965 int total_args_passed, |
|
966 int comp_args_on_stack, |
|
967 const BasicType *sig_bt, |
|
968 const VMRegPair *regs) { |
|
969 |
|
970 // Load method's entry-point from methodOop. |
|
971 __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method); |
|
972 __ mtctr(R12_scratch2); |
|
973 |
|
974 // We will only enter here from an interpreted frame and never from after |
|
975 // passing thru a c2i. Azul allowed this but we do not. If we lose the |
|
976 // race and use a c2i we will remain interpreted for the race loser(s). |
|
977 // This removes all sorts of headaches on the x86 side and also eliminates |
|
978 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions. |
|
979 |
|
980 // Note: r13 contains the senderSP on entry. We must preserve it since |
|
981 // we may do a i2c -> c2i transition if we lose a race where compiled |
|
982 // code goes non-entrant while we get args ready. |
|
983 // In addition we use r13 to locate all the interpreter args as |
|
984 // we must align the stack to 16 bytes on an i2c entry else we |
|
985 // lose alignment we expect in all compiled code and register |
|
986 // save code can segv when fxsave instructions find improperly |
|
987 // aligned stack pointer. |
|
988 |
|
989 const Register ld_ptr = R17_tos; |
|
990 const Register value_regs[] = { R22_tmp2, R23_tmp3, R24_tmp4, R25_tmp5, R26_tmp6 }; |
|
991 const int num_value_regs = sizeof(value_regs) / sizeof(Register); |
|
992 int value_regs_index = 0; |
|
993 |
|
994 int ld_offset = total_args_passed*wordSize; |
|
995 |
|
996 // Cut-out for having no stack args. Since up to 2 int/oop args are passed |
|
997 // in registers, we will occasionally have no stack args. |
|
998 int comp_words_on_stack = 0; |
|
999 if (comp_args_on_stack) { |
|
1000 // Sig words on the stack are greater-than VMRegImpl::stack0. Those in |
|
1001 // registers are below. By subtracting stack0, we either get a negative |
|
1002 // number (all values in registers) or the maximum stack slot accessed. |
|
1003 |
|
1004 // Convert 4-byte c2 stack slots to words. |
|
1005 comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord; |
|
1006 // Round up to miminum stack alignment, in wordSize. |
|
1007 comp_words_on_stack = round_to(comp_words_on_stack, 2); |
|
1008 __ resize_frame(-comp_words_on_stack * wordSize, R11_scratch1); |
|
1009 } |
|
1010 |
|
1011 // Now generate the shuffle code. Pick up all register args and move the |
|
1012 // rest through register value=Z_R12. |
|
1013 BLOCK_COMMENT("Shuffle arguments"); |
|
1014 for (int i = 0; i < total_args_passed; i++) { |
|
1015 if (sig_bt[i] == T_VOID) { |
|
1016 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); |
|
1017 continue; |
|
1018 } |
|
1019 |
|
1020 // Pick up 0, 1 or 2 words from ld_ptr. |
|
1021 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), |
|
1022 "scrambled load targets?"); |
|
1023 VMReg r_1 = regs[i].first(); |
|
1024 VMReg r_2 = regs[i].second(); |
|
1025 if (!r_1->is_valid()) { |
|
1026 assert(!r_2->is_valid(), ""); |
|
1027 continue; |
|
1028 } |
|
1029 if (r_1->is_FloatRegister()) { |
|
1030 if (!r_2->is_valid()) { |
|
1031 __ lfs(r_1->as_FloatRegister(), ld_offset, ld_ptr); |
|
1032 ld_offset-=wordSize; |
|
1033 } else { |
|
1034 // Skip the unused interpreter slot. |
|
1035 __ lfd(r_1->as_FloatRegister(), ld_offset-wordSize, ld_ptr); |
|
1036 ld_offset-=2*wordSize; |
|
1037 } |
|
1038 } else { |
|
1039 Register r; |
|
1040 if (r_1->is_stack()) { |
|
1041 // Must do a memory to memory move thru "value". |
|
1042 r = value_regs[value_regs_index]; |
|
1043 value_regs_index = (value_regs_index + 1) % num_value_regs; |
|
1044 } else { |
|
1045 r = r_1->as_Register(); |
|
1046 } |
|
1047 if (!r_2->is_valid()) { |
|
1048 // Not sure we need to do this but it shouldn't hurt. |
|
1049 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ADDRESS || sig_bt[i] == T_ARRAY) { |
|
1050 __ ld(r, ld_offset, ld_ptr); |
|
1051 ld_offset-=wordSize; |
|
1052 } else { |
|
1053 __ lwz(r, ld_offset, ld_ptr); |
|
1054 ld_offset-=wordSize; |
|
1055 } |
|
1056 } else { |
|
1057 // In 64bit, longs are given 2 64-bit slots in the interpreter, but the |
|
1058 // data is passed in only 1 slot. |
|
1059 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { |
|
1060 ld_offset-=wordSize; |
|
1061 } |
|
1062 __ ld(r, ld_offset, ld_ptr); |
|
1063 ld_offset-=wordSize; |
|
1064 } |
|
1065 |
|
1066 if (r_1->is_stack()) { |
|
1067 // Now store value where the compiler expects it |
|
1068 int st_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots())*VMRegImpl::stack_slot_size; |
|
1069 |
|
1070 if (sig_bt[i] == T_INT || sig_bt[i] == T_FLOAT ||sig_bt[i] == T_BOOLEAN || |
|
1071 sig_bt[i] == T_SHORT || sig_bt[i] == T_CHAR || sig_bt[i] == T_BYTE) { |
|
1072 __ stw(r, st_off, R1_SP); |
|
1073 } else { |
|
1074 __ std(r, st_off, R1_SP); |
|
1075 } |
|
1076 } |
|
1077 } |
|
1078 } |
|
1079 |
|
1080 BLOCK_COMMENT("Store method oop"); |
|
1081 // Store method oop into thread->callee_target. |
|
1082 // We might end up in handle_wrong_method if the callee is |
|
1083 // deoptimized as we race thru here. If that happens we don't want |
|
1084 // to take a safepoint because the caller frame will look |
|
1085 // interpreted and arguments are now "compiled" so it is much better |
|
1086 // to make this transition invisible to the stack walking |
|
1087 // code. Unfortunately if we try and find the callee by normal means |
|
1088 // a safepoint is possible. So we stash the desired callee in the |
|
1089 // thread and the vm will find there should this case occur. |
|
1090 __ std(R19_method, thread_(callee_target)); |
|
1091 |
|
1092 // Jump to the compiled code just as if compiled code was doing it. |
|
1093 __ bctr(); |
|
1094 } |
|
1095 |
|
1096 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm, |
|
1097 int total_args_passed, |
|
1098 int comp_args_on_stack, |
|
1099 const BasicType *sig_bt, |
|
1100 const VMRegPair *regs, |
|
1101 AdapterFingerPrint* fingerprint) { |
|
1102 address i2c_entry; |
|
1103 address c2i_unverified_entry; |
|
1104 address c2i_entry; |
|
1105 |
|
1106 |
|
1107 // entry: i2c |
|
1108 |
|
1109 __ align(CodeEntryAlignment); |
|
1110 i2c_entry = __ pc(); |
|
1111 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs); |
|
1112 |
|
1113 |
|
1114 // entry: c2i unverified |
|
1115 |
|
1116 __ align(CodeEntryAlignment); |
|
1117 BLOCK_COMMENT("c2i unverified entry"); |
|
1118 c2i_unverified_entry = __ pc(); |
|
1119 |
|
1120 // inline_cache contains a compiledICHolder |
|
1121 const Register ic = R19_method; |
|
1122 const Register ic_klass = R11_scratch1; |
|
1123 const Register receiver_klass = R12_scratch2; |
|
1124 const Register code = R21_tmp1; |
|
1125 const Register ientry = R23_tmp3; |
|
1126 |
|
1127 assert_different_registers(ic, ic_klass, receiver_klass, R3_ARG1, code, ientry); |
|
1128 assert(R11_scratch1 == R11, "need prologue scratch register"); |
|
1129 |
|
1130 Label call_interpreter; |
|
1131 |
|
1132 assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), |
|
1133 "klass offset should reach into any page"); |
|
1134 // Check for NULL argument if we don't have implicit null checks. |
|
1135 if (!ImplicitNullChecks NOT_LINUX(|| true) /*!os::zero_page_read_protected()*/) { |
|
1136 if (TrapBasedNullChecks) { |
|
1137 __ trap_null_check(R3_ARG1); |
|
1138 } else { |
|
1139 Label valid; |
|
1140 __ cmpdi(CCR0, R3_ARG1, 0); |
|
1141 __ bne_predict_taken(CCR0, valid); |
|
1142 // We have a null argument, branch to ic_miss_stub. |
|
1143 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), |
|
1144 relocInfo::runtime_call_type); |
|
1145 __ BIND(valid); |
|
1146 } |
|
1147 } |
|
1148 // Assume argument is not NULL, load klass from receiver. |
|
1149 __ load_klass(receiver_klass, R3_ARG1); |
|
1150 |
|
1151 __ ld(ic_klass, CompiledICHolder::holder_klass_offset(), ic); |
|
1152 |
|
1153 if (TrapBasedICMissChecks) { |
|
1154 __ trap_ic_miss_check(receiver_klass, ic_klass); |
|
1155 } else { |
|
1156 Label valid; |
|
1157 __ cmpd(CCR0, receiver_klass, ic_klass); |
|
1158 __ beq_predict_taken(CCR0, valid); |
|
1159 // We have an unexpected klass, branch to ic_miss_stub. |
|
1160 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), |
|
1161 relocInfo::runtime_call_type); |
|
1162 __ BIND(valid); |
|
1163 } |
|
1164 |
|
1165 // Argument is valid and klass is as expected, continue. |
|
1166 |
|
1167 // Extract method from inline cache, verified entry point needs it. |
|
1168 __ ld(R19_method, CompiledICHolder::holder_method_offset(), ic); |
|
1169 assert(R19_method == ic, "the inline cache register is dead here"); |
|
1170 |
|
1171 __ ld(code, method_(code)); |
|
1172 __ cmpdi(CCR0, code, 0); |
|
1173 __ ld(ientry, method_(interpreter_entry)); // preloaded |
|
1174 __ beq_predict_taken(CCR0, call_interpreter); |
|
1175 |
|
1176 // Branch to ic_miss_stub. |
|
1177 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), |
|
1178 relocInfo::runtime_call_type); |
|
1179 |
|
1180 // entry: c2i |
|
1181 |
|
1182 c2i_entry = gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, call_interpreter, ientry); |
|
1183 |
|
1184 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry); |
|
1185 } |
|
1186 |
|
1187 #ifdef COMPILER2 |
|
1188 // An oop arg. Must pass a handle not the oop itself. |
|
1189 static void object_move(MacroAssembler* masm, |
|
1190 int frame_size_in_slots, |
|
1191 OopMap* oop_map, int oop_handle_offset, |
|
1192 bool is_receiver, int* receiver_offset, |
|
1193 VMRegPair src, VMRegPair dst, |
|
1194 Register r_caller_sp, Register r_temp_1, Register r_temp_2) { |
|
1195 assert(!is_receiver || (is_receiver && (*receiver_offset == -1)), |
|
1196 "receiver has already been moved"); |
|
1197 |
|
1198 // We must pass a handle. First figure out the location we use as a handle. |
|
1199 |
|
1200 if (src.first()->is_stack()) { |
|
1201 // stack to stack or reg |
|
1202 |
|
1203 const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register(); |
|
1204 Label skip; |
|
1205 const int oop_slot_in_callers_frame = reg2slot(src.first()); |
|
1206 |
|
1207 guarantee(!is_receiver, "expecting receiver in register"); |
|
1208 oop_map->set_oop(VMRegImpl::stack2reg(oop_slot_in_callers_frame + frame_size_in_slots)); |
|
1209 |
|
1210 __ addi(r_handle, r_caller_sp, reg2offset(src.first())); |
|
1211 __ ld( r_temp_2, reg2offset(src.first()), r_caller_sp); |
|
1212 __ cmpdi(CCR0, r_temp_2, 0); |
|
1213 __ bne(CCR0, skip); |
|
1214 // Use a NULL handle if oop is NULL. |
|
1215 __ li(r_handle, 0); |
|
1216 __ bind(skip); |
|
1217 |
|
1218 if (dst.first()->is_stack()) { |
|
1219 // stack to stack |
|
1220 __ std(r_handle, reg2offset(dst.first()), R1_SP); |
|
1221 } else { |
|
1222 // stack to reg |
|
1223 // Nothing to do, r_handle is already the dst register. |
|
1224 } |
|
1225 } else { |
|
1226 // reg to stack or reg |
|
1227 const Register r_oop = src.first()->as_Register(); |
|
1228 const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register(); |
|
1229 const int oop_slot = (r_oop->encoding()-R3_ARG1->encoding()) * VMRegImpl::slots_per_word |
|
1230 + oop_handle_offset; // in slots |
|
1231 const int oop_offset = oop_slot * VMRegImpl::stack_slot_size; |
|
1232 Label skip; |
|
1233 |
|
1234 if (is_receiver) { |
|
1235 *receiver_offset = oop_offset; |
|
1236 } |
|
1237 oop_map->set_oop(VMRegImpl::stack2reg(oop_slot)); |
|
1238 |
|
1239 __ std( r_oop, oop_offset, R1_SP); |
|
1240 __ addi(r_handle, R1_SP, oop_offset); |
|
1241 |
|
1242 __ cmpdi(CCR0, r_oop, 0); |
|
1243 __ bne(CCR0, skip); |
|
1244 // Use a NULL handle if oop is NULL. |
|
1245 __ li(r_handle, 0); |
|
1246 __ bind(skip); |
|
1247 |
|
1248 if (dst.first()->is_stack()) { |
|
1249 // reg to stack |
|
1250 __ std(r_handle, reg2offset(dst.first()), R1_SP); |
|
1251 } else { |
|
1252 // reg to reg |
|
1253 // Nothing to do, r_handle is already the dst register. |
|
1254 } |
|
1255 } |
|
1256 } |
|
1257 |
|
1258 static void int_move(MacroAssembler*masm, |
|
1259 VMRegPair src, VMRegPair dst, |
|
1260 Register r_caller_sp, Register r_temp) { |
|
1261 assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be long-int"); |
|
1262 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long"); |
|
1263 |
|
1264 if (src.first()->is_stack()) { |
|
1265 if (dst.first()->is_stack()) { |
|
1266 // stack to stack |
|
1267 __ lwa(r_temp, reg2offset(src.first()), r_caller_sp); |
|
1268 __ std(r_temp, reg2offset(dst.first()), R1_SP); |
|
1269 } else { |
|
1270 // stack to reg |
|
1271 __ lwa(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp); |
|
1272 } |
|
1273 } else if (dst.first()->is_stack()) { |
|
1274 // reg to stack |
|
1275 __ extsw(r_temp, src.first()->as_Register()); |
|
1276 __ std(r_temp, reg2offset(dst.first()), R1_SP); |
|
1277 } else { |
|
1278 // reg to reg |
|
1279 __ extsw(dst.first()->as_Register(), src.first()->as_Register()); |
|
1280 } |
|
1281 } |
|
1282 |
|
1283 static void long_move(MacroAssembler*masm, |
|
1284 VMRegPair src, VMRegPair dst, |
|
1285 Register r_caller_sp, Register r_temp) { |
|
1286 assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be long"); |
|
1287 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long"); |
|
1288 |
|
1289 if (src.first()->is_stack()) { |
|
1290 if (dst.first()->is_stack()) { |
|
1291 // stack to stack |
|
1292 __ ld( r_temp, reg2offset(src.first()), r_caller_sp); |
|
1293 __ std(r_temp, reg2offset(dst.first()), R1_SP); |
|
1294 } else { |
|
1295 // stack to reg |
|
1296 __ ld(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp); |
|
1297 } |
|
1298 } else if (dst.first()->is_stack()) { |
|
1299 // reg to stack |
|
1300 __ std(src.first()->as_Register(), reg2offset(dst.first()), R1_SP); |
|
1301 } else { |
|
1302 // reg to reg |
|
1303 if (dst.first()->as_Register() != src.first()->as_Register()) |
|
1304 __ mr(dst.first()->as_Register(), src.first()->as_Register()); |
|
1305 } |
|
1306 } |
|
1307 |
|
1308 static void float_move(MacroAssembler*masm, |
|
1309 VMRegPair src, VMRegPair dst, |
|
1310 Register r_caller_sp, Register r_temp) { |
|
1311 assert(src.first()->is_valid() && !src.second()->is_valid(), "incoming must be float"); |
|
1312 assert(dst.first()->is_valid() && !dst.second()->is_valid(), "outgoing must be float"); |
|
1313 |
|
1314 if (src.first()->is_stack()) { |
|
1315 if (dst.first()->is_stack()) { |
|
1316 // stack to stack |
|
1317 __ lwz(r_temp, reg2offset(src.first()), r_caller_sp); |
|
1318 __ stw(r_temp, reg2offset(dst.first()), R1_SP); |
|
1319 } else { |
|
1320 // stack to reg |
|
1321 __ lfs(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp); |
|
1322 } |
|
1323 } else if (dst.first()->is_stack()) { |
|
1324 // reg to stack |
|
1325 __ stfs(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP); |
|
1326 } else { |
|
1327 // reg to reg |
|
1328 if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister()) |
|
1329 __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); |
|
1330 } |
|
1331 } |
|
1332 |
|
1333 static void double_move(MacroAssembler*masm, |
|
1334 VMRegPair src, VMRegPair dst, |
|
1335 Register r_caller_sp, Register r_temp) { |
|
1336 assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be double"); |
|
1337 assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be double"); |
|
1338 |
|
1339 if (src.first()->is_stack()) { |
|
1340 if (dst.first()->is_stack()) { |
|
1341 // stack to stack |
|
1342 __ ld( r_temp, reg2offset(src.first()), r_caller_sp); |
|
1343 __ std(r_temp, reg2offset(dst.first()), R1_SP); |
|
1344 } else { |
|
1345 // stack to reg |
|
1346 __ lfd(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp); |
|
1347 } |
|
1348 } else if (dst.first()->is_stack()) { |
|
1349 // reg to stack |
|
1350 __ stfd(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP); |
|
1351 } else { |
|
1352 // reg to reg |
|
1353 if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister()) |
|
1354 __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); |
|
1355 } |
|
1356 } |
|
1357 |
|
1358 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { |
|
1359 switch (ret_type) { |
|
1360 case T_BOOLEAN: |
|
1361 case T_CHAR: |
|
1362 case T_BYTE: |
|
1363 case T_SHORT: |
|
1364 case T_INT: |
|
1365 __ stw (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); |
|
1366 break; |
|
1367 case T_ARRAY: |
|
1368 case T_OBJECT: |
|
1369 case T_LONG: |
|
1370 __ std (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); |
|
1371 break; |
|
1372 case T_FLOAT: |
|
1373 __ stfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); |
|
1374 break; |
|
1375 case T_DOUBLE: |
|
1376 __ stfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); |
|
1377 break; |
|
1378 case T_VOID: |
|
1379 break; |
|
1380 default: |
|
1381 ShouldNotReachHere(); |
|
1382 break; |
|
1383 } |
|
1384 } |
|
1385 |
|
1386 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { |
|
1387 switch (ret_type) { |
|
1388 case T_BOOLEAN: |
|
1389 case T_CHAR: |
|
1390 case T_BYTE: |
|
1391 case T_SHORT: |
|
1392 case T_INT: |
|
1393 __ lwz(R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); |
|
1394 break; |
|
1395 case T_ARRAY: |
|
1396 case T_OBJECT: |
|
1397 case T_LONG: |
|
1398 __ ld (R3_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); |
|
1399 break; |
|
1400 case T_FLOAT: |
|
1401 __ lfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); |
|
1402 break; |
|
1403 case T_DOUBLE: |
|
1404 __ lfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP); |
|
1405 break; |
|
1406 case T_VOID: |
|
1407 break; |
|
1408 default: |
|
1409 ShouldNotReachHere(); |
|
1410 break; |
|
1411 } |
|
1412 } |
|
1413 |
|
1414 static void save_or_restore_arguments(MacroAssembler* masm, |
|
1415 const int stack_slots, |
|
1416 const int total_in_args, |
|
1417 const int arg_save_area, |
|
1418 OopMap* map, |
|
1419 VMRegPair* in_regs, |
|
1420 BasicType* in_sig_bt) { |
|
1421 // If map is non-NULL then the code should store the values, |
|
1422 // otherwise it should load them. |
|
1423 int slot = arg_save_area; |
|
1424 // Save down double word first. |
|
1425 for (int i = 0; i < total_in_args; i++) { |
|
1426 if (in_regs[i].first()->is_FloatRegister() && in_sig_bt[i] == T_DOUBLE) { |
|
1427 int offset = slot * VMRegImpl::stack_slot_size; |
|
1428 slot += VMRegImpl::slots_per_word; |
|
1429 assert(slot <= stack_slots, "overflow (after DOUBLE stack slot)"); |
|
1430 if (map != NULL) { |
|
1431 __ stfd(in_regs[i].first()->as_FloatRegister(), offset, R1_SP); |
|
1432 } else { |
|
1433 __ lfd(in_regs[i].first()->as_FloatRegister(), offset, R1_SP); |
|
1434 } |
|
1435 } else if (in_regs[i].first()->is_Register() && |
|
1436 (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) { |
|
1437 int offset = slot * VMRegImpl::stack_slot_size; |
|
1438 if (map != NULL) { |
|
1439 __ std(in_regs[i].first()->as_Register(), offset, R1_SP); |
|
1440 if (in_sig_bt[i] == T_ARRAY) { |
|
1441 map->set_oop(VMRegImpl::stack2reg(slot)); |
|
1442 } |
|
1443 } else { |
|
1444 __ ld(in_regs[i].first()->as_Register(), offset, R1_SP); |
|
1445 } |
|
1446 slot += VMRegImpl::slots_per_word; |
|
1447 assert(slot <= stack_slots, "overflow (after LONG/ARRAY stack slot)"); |
|
1448 } |
|
1449 } |
|
1450 // Save or restore single word registers. |
|
1451 for (int i = 0; i < total_in_args; i++) { |
|
1452 // PPC64: pass ints as longs: must only deal with floats here. |
|
1453 if (in_regs[i].first()->is_FloatRegister()) { |
|
1454 if (in_sig_bt[i] == T_FLOAT) { |
|
1455 int offset = slot * VMRegImpl::stack_slot_size; |
|
1456 slot++; |
|
1457 assert(slot <= stack_slots, "overflow (after FLOAT stack slot)"); |
|
1458 if (map != NULL) { |
|
1459 __ stfs(in_regs[i].first()->as_FloatRegister(), offset, R1_SP); |
|
1460 } else { |
|
1461 __ lfs(in_regs[i].first()->as_FloatRegister(), offset, R1_SP); |
|
1462 } |
|
1463 } |
|
1464 } else if (in_regs[i].first()->is_stack()) { |
|
1465 if (in_sig_bt[i] == T_ARRAY && map != NULL) { |
|
1466 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); |
|
1467 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots)); |
|
1468 } |
|
1469 } |
|
1470 } |
|
1471 } |
|
1472 |
|
1473 // Check GC_locker::needs_gc and enter the runtime if it's true. This |
|
1474 // keeps a new JNI critical region from starting until a GC has been |
|
1475 // forced. Save down any oops in registers and describe them in an |
|
1476 // OopMap. |
|
1477 static void check_needs_gc_for_critical_native(MacroAssembler* masm, |
|
1478 const int stack_slots, |
|
1479 const int total_in_args, |
|
1480 const int arg_save_area, |
|
1481 OopMapSet* oop_maps, |
|
1482 VMRegPair* in_regs, |
|
1483 BasicType* in_sig_bt, |
|
1484 Register tmp_reg ) { |
|
1485 __ block_comment("check GC_locker::needs_gc"); |
|
1486 Label cont; |
|
1487 __ lbz(tmp_reg, (RegisterOrConstant)(intptr_t)GC_locker::needs_gc_address()); |
|
1488 __ cmplwi(CCR0, tmp_reg, 0); |
|
1489 __ beq(CCR0, cont); |
|
1490 |
|
1491 // Save down any values that are live in registers and call into the |
|
1492 // runtime to halt for a GC. |
|
1493 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); |
|
1494 save_or_restore_arguments(masm, stack_slots, total_in_args, |
|
1495 arg_save_area, map, in_regs, in_sig_bt); |
|
1496 |
|
1497 __ mr(R3_ARG1, R16_thread); |
|
1498 __ set_last_Java_frame(R1_SP, noreg); |
|
1499 |
|
1500 __ block_comment("block_for_jni_critical"); |
|
1501 address entry_point = CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical); |
|
1502 __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, entry_point), relocInfo::runtime_call_type); |
|
1503 address start = __ pc() - __ offset(), |
|
1504 calls_return_pc = __ last_calls_return_pc(); |
|
1505 oop_maps->add_gc_map(calls_return_pc - start, map); |
|
1506 |
|
1507 __ reset_last_Java_frame(); |
|
1508 |
|
1509 // Reload all the register arguments. |
|
1510 save_or_restore_arguments(masm, stack_slots, total_in_args, |
|
1511 arg_save_area, NULL, in_regs, in_sig_bt); |
|
1512 |
|
1513 __ BIND(cont); |
|
1514 |
|
1515 #ifdef ASSERT |
|
1516 if (StressCriticalJNINatives) { |
|
1517 // Stress register saving. |
|
1518 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); |
|
1519 save_or_restore_arguments(masm, stack_slots, total_in_args, |
|
1520 arg_save_area, map, in_regs, in_sig_bt); |
|
1521 // Destroy argument registers. |
|
1522 for (int i = 0; i < total_in_args; i++) { |
|
1523 if (in_regs[i].first()->is_Register()) { |
|
1524 const Register reg = in_regs[i].first()->as_Register(); |
|
1525 __ neg(reg, reg); |
|
1526 } else if (in_regs[i].first()->is_FloatRegister()) { |
|
1527 __ fneg(in_regs[i].first()->as_FloatRegister(), in_regs[i].first()->as_FloatRegister()); |
|
1528 } |
|
1529 } |
|
1530 |
|
1531 save_or_restore_arguments(masm, stack_slots, total_in_args, |
|
1532 arg_save_area, NULL, in_regs, in_sig_bt); |
|
1533 } |
|
1534 #endif |
|
1535 } |
|
1536 |
|
1537 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst, Register r_caller_sp, Register r_temp) { |
|
1538 if (src.first()->is_stack()) { |
|
1539 if (dst.first()->is_stack()) { |
|
1540 // stack to stack |
|
1541 __ ld(r_temp, reg2offset(src.first()), r_caller_sp); |
|
1542 __ std(r_temp, reg2offset(dst.first()), R1_SP); |
|
1543 } else { |
|
1544 // stack to reg |
|
1545 __ ld(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp); |
|
1546 } |
|
1547 } else if (dst.first()->is_stack()) { |
|
1548 // reg to stack |
|
1549 __ std(src.first()->as_Register(), reg2offset(dst.first()), R1_SP); |
|
1550 } else { |
|
1551 if (dst.first() != src.first()) { |
|
1552 __ mr(dst.first()->as_Register(), src.first()->as_Register()); |
|
1553 } |
|
1554 } |
|
1555 } |
|
1556 |
|
1557 // Unpack an array argument into a pointer to the body and the length |
|
1558 // if the array is non-null, otherwise pass 0 for both. |
|
1559 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, |
|
1560 VMRegPair body_arg, VMRegPair length_arg, Register r_caller_sp, |
|
1561 Register tmp_reg, Register tmp2_reg) { |
|
1562 assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg, |
|
1563 "possible collision"); |
|
1564 assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg, |
|
1565 "possible collision"); |
|
1566 |
|
1567 // Pass the length, ptr pair. |
|
1568 Label set_out_args; |
|
1569 VMRegPair tmp, tmp2; |
|
1570 tmp.set_ptr(tmp_reg->as_VMReg()); |
|
1571 tmp2.set_ptr(tmp2_reg->as_VMReg()); |
|
1572 if (reg.first()->is_stack()) { |
|
1573 // Load the arg up from the stack. |
|
1574 move_ptr(masm, reg, tmp, r_caller_sp, /*unused*/ R0); |
|
1575 reg = tmp; |
|
1576 } |
|
1577 __ li(tmp2_reg, 0); // Pass zeros if Array=null. |
|
1578 if (tmp_reg != reg.first()->as_Register()) __ li(tmp_reg, 0); |
|
1579 __ cmpdi(CCR0, reg.first()->as_Register(), 0); |
|
1580 __ beq(CCR0, set_out_args); |
|
1581 __ lwa(tmp2_reg, arrayOopDesc::length_offset_in_bytes(), reg.first()->as_Register()); |
|
1582 __ addi(tmp_reg, reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)); |
|
1583 __ bind(set_out_args); |
|
1584 move_ptr(masm, tmp, body_arg, r_caller_sp, /*unused*/ R0); |
|
1585 move_ptr(masm, tmp2, length_arg, r_caller_sp, /*unused*/ R0); // Same as move32_64 on PPC64. |
|
1586 } |
|
1587 |
|
1588 static void verify_oop_args(MacroAssembler* masm, |
|
1589 methodHandle method, |
|
1590 const BasicType* sig_bt, |
|
1591 const VMRegPair* regs) { |
|
1592 Register temp_reg = R19_method; // not part of any compiled calling seq |
|
1593 if (VerifyOops) { |
|
1594 for (int i = 0; i < method->size_of_parameters(); i++) { |
|
1595 if (sig_bt[i] == T_OBJECT || |
|
1596 sig_bt[i] == T_ARRAY) { |
|
1597 VMReg r = regs[i].first(); |
|
1598 assert(r->is_valid(), "bad oop arg"); |
|
1599 if (r->is_stack()) { |
|
1600 __ ld(temp_reg, reg2offset(r), R1_SP); |
|
1601 __ verify_oop(temp_reg); |
|
1602 } else { |
|
1603 __ verify_oop(r->as_Register()); |
|
1604 } |
|
1605 } |
|
1606 } |
|
1607 } |
|
1608 } |
|
1609 |
|
1610 static void gen_special_dispatch(MacroAssembler* masm, |
|
1611 methodHandle method, |
|
1612 const BasicType* sig_bt, |
|
1613 const VMRegPair* regs) { |
|
1614 verify_oop_args(masm, method, sig_bt, regs); |
|
1615 vmIntrinsics::ID iid = method->intrinsic_id(); |
|
1616 |
|
1617 // Now write the args into the outgoing interpreter space |
|
1618 bool has_receiver = false; |
|
1619 Register receiver_reg = noreg; |
|
1620 int member_arg_pos = -1; |
|
1621 Register member_reg = noreg; |
|
1622 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid); |
|
1623 if (ref_kind != 0) { |
|
1624 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument |
|
1625 member_reg = R19_method; // known to be free at this point |
|
1626 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind); |
|
1627 } else if (iid == vmIntrinsics::_invokeBasic) { |
|
1628 has_receiver = true; |
|
1629 } else { |
|
1630 fatal(err_msg_res("unexpected intrinsic id %d", iid)); |
|
1631 } |
|
1632 |
|
1633 if (member_reg != noreg) { |
|
1634 // Load the member_arg into register, if necessary. |
|
1635 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs); |
|
1636 VMReg r = regs[member_arg_pos].first(); |
|
1637 if (r->is_stack()) { |
|
1638 __ ld(member_reg, reg2offset(r), R1_SP); |
|
1639 } else { |
|
1640 // no data motion is needed |
|
1641 member_reg = r->as_Register(); |
|
1642 } |
|
1643 } |
|
1644 |
|
1645 if (has_receiver) { |
|
1646 // Make sure the receiver is loaded into a register. |
|
1647 assert(method->size_of_parameters() > 0, "oob"); |
|
1648 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object"); |
|
1649 VMReg r = regs[0].first(); |
|
1650 assert(r->is_valid(), "bad receiver arg"); |
|
1651 if (r->is_stack()) { |
|
1652 // Porting note: This assumes that compiled calling conventions always |
|
1653 // pass the receiver oop in a register. If this is not true on some |
|
1654 // platform, pick a temp and load the receiver from stack. |
|
1655 fatal("receiver always in a register"); |
|
1656 receiver_reg = R11_scratch1; // TODO (hs24): is R11_scratch1 really free at this point? |
|
1657 __ ld(receiver_reg, reg2offset(r), R1_SP); |
|
1658 } else { |
|
1659 // no data motion is needed |
|
1660 receiver_reg = r->as_Register(); |
|
1661 } |
|
1662 } |
|
1663 |
|
1664 // Figure out which address we are really jumping to: |
|
1665 MethodHandles::generate_method_handle_dispatch(masm, iid, |
|
1666 receiver_reg, member_reg, /*for_compiler_entry:*/ true); |
|
1667 } |
|
1668 |
|
1669 #endif // COMPILER2 |
|
1670 |
|
1671 // --------------------------------------------------------------------------- |
|
1672 // Generate a native wrapper for a given method. The method takes arguments |
|
1673 // in the Java compiled code convention, marshals them to the native |
|
1674 // convention (handlizes oops, etc), transitions to native, makes the call, |
|
1675 // returns to java state (possibly blocking), unhandlizes any result and |
|
1676 // returns. |
|
1677 // |
|
1678 // Critical native functions are a shorthand for the use of |
|
1679 // GetPrimtiveArrayCritical and disallow the use of any other JNI |
|
1680 // functions. The wrapper is expected to unpack the arguments before |
|
1681 // passing them to the callee and perform checks before and after the |
|
1682 // native call to ensure that they GC_locker |
|
1683 // lock_critical/unlock_critical semantics are followed. Some other |
|
1684 // parts of JNI setup are skipped like the tear down of the JNI handle |
|
1685 // block and the check for pending exceptions it's impossible for them |
|
1686 // to be thrown. |
|
1687 // |
|
1688 // They are roughly structured like this: |
|
1689 // if (GC_locker::needs_gc()) |
|
1690 // SharedRuntime::block_for_jni_critical(); |
|
1691 // tranistion to thread_in_native |
|
1692 // unpack arrray arguments and call native entry point |
|
1693 // check for safepoint in progress |
|
1694 // check if any thread suspend flags are set |
|
1695 // call into JVM and possible unlock the JNI critical |
|
1696 // if a GC was suppressed while in the critical native. |
|
1697 // transition back to thread_in_Java |
|
1698 // return to caller |
|
1699 // |
|
1700 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, |
|
1701 methodHandle method, |
|
1702 int compile_id, |
|
1703 BasicType *in_sig_bt, |
|
1704 VMRegPair *in_regs, |
|
1705 BasicType ret_type) { |
|
1706 #ifdef COMPILER2 |
|
1707 if (method->is_method_handle_intrinsic()) { |
|
1708 vmIntrinsics::ID iid = method->intrinsic_id(); |
|
1709 intptr_t start = (intptr_t)__ pc(); |
|
1710 int vep_offset = ((intptr_t)__ pc()) - start; |
|
1711 gen_special_dispatch(masm, |
|
1712 method, |
|
1713 in_sig_bt, |
|
1714 in_regs); |
|
1715 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period |
|
1716 __ flush(); |
|
1717 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually |
|
1718 return nmethod::new_native_nmethod(method, |
|
1719 compile_id, |
|
1720 masm->code(), |
|
1721 vep_offset, |
|
1722 frame_complete, |
|
1723 stack_slots / VMRegImpl::slots_per_word, |
|
1724 in_ByteSize(-1), |
|
1725 in_ByteSize(-1), |
|
1726 (OopMapSet*)NULL); |
|
1727 } |
|
1728 |
|
1729 bool is_critical_native = true; |
|
1730 address native_func = method->critical_native_function(); |
|
1731 if (native_func == NULL) { |
|
1732 native_func = method->native_function(); |
|
1733 is_critical_native = false; |
|
1734 } |
|
1735 assert(native_func != NULL, "must have function"); |
|
1736 |
|
1737 // First, create signature for outgoing C call |
|
1738 // -------------------------------------------------------------------------- |
|
1739 |
|
1740 int total_in_args = method->size_of_parameters(); |
|
1741 // We have received a description of where all the java args are located |
|
1742 // on entry to the wrapper. We need to convert these args to where |
|
1743 // the jni function will expect them. To figure out where they go |
|
1744 // we convert the java signature to a C signature by inserting |
|
1745 // the hidden arguments as arg[0] and possibly arg[1] (static method) |
|
1746 // |
|
1747 // Additionally, on ppc64 we must convert integers to longs in the C |
|
1748 // signature. We do this in advance in order to have no trouble with |
|
1749 // indexes into the bt-arrays. |
|
1750 // So convert the signature and registers now, and adjust the total number |
|
1751 // of in-arguments accordingly. |
|
1752 int i2l_argcnt = convert_ints_to_longints_argcnt(total_in_args, in_sig_bt); // PPC64: pass ints as longs. |
|
1753 |
|
1754 // Calculate the total number of C arguments and create arrays for the |
|
1755 // signature and the outgoing registers. |
|
1756 // On ppc64, we have two arrays for the outgoing registers, because |
|
1757 // some floating-point arguments must be passed in registers _and_ |
|
1758 // in stack locations. |
|
1759 bool method_is_static = method->is_static(); |
|
1760 int total_c_args = i2l_argcnt; |
|
1761 |
|
1762 if (!is_critical_native) { |
|
1763 int n_hidden_args = method_is_static ? 2 : 1; |
|
1764 total_c_args += n_hidden_args; |
|
1765 } else { |
|
1766 // No JNIEnv*, no this*, but unpacked arrays (base+length). |
|
1767 for (int i = 0; i < total_in_args; i++) { |
|
1768 if (in_sig_bt[i] == T_ARRAY) { |
|
1769 total_c_args += 2; // PPC64: T_LONG, T_INT, T_ADDRESS (see convert_ints_to_longints and c_calling_convention) |
|
1770 } |
|
1771 } |
|
1772 } |
|
1773 |
|
1774 BasicType *out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); |
|
1775 VMRegPair *out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); |
|
1776 VMRegPair *out_regs2 = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); |
|
1777 BasicType* in_elem_bt = NULL; |
|
1778 |
|
1779 // Create the signature for the C call: |
|
1780 // 1) add the JNIEnv* |
|
1781 // 2) add the class if the method is static |
|
1782 // 3) copy the rest of the incoming signature (shifted by the number of |
|
1783 // hidden arguments). |
|
1784 |
|
1785 int argc = 0; |
|
1786 if (!is_critical_native) { |
|
1787 convert_ints_to_longints(i2l_argcnt, total_in_args, in_sig_bt, in_regs); // PPC64: pass ints as longs. |
|
1788 |
|
1789 out_sig_bt[argc++] = T_ADDRESS; |
|
1790 if (method->is_static()) { |
|
1791 out_sig_bt[argc++] = T_OBJECT; |
|
1792 } |
|
1793 |
|
1794 for (int i = 0; i < total_in_args ; i++ ) { |
|
1795 out_sig_bt[argc++] = in_sig_bt[i]; |
|
1796 } |
|
1797 } else { |
|
1798 Thread* THREAD = Thread::current(); |
|
1799 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, i2l_argcnt); |
|
1800 SignatureStream ss(method->signature()); |
|
1801 int o = 0; |
|
1802 for (int i = 0; i < total_in_args ; i++, o++) { |
|
1803 if (in_sig_bt[i] == T_ARRAY) { |
|
1804 // Arrays are passed as int, elem* pair |
|
1805 Symbol* atype = ss.as_symbol(CHECK_NULL); |
|
1806 const char* at = atype->as_C_string(); |
|
1807 if (strlen(at) == 2) { |
|
1808 assert(at[0] == '[', "must be"); |
|
1809 switch (at[1]) { |
|
1810 case 'B': in_elem_bt[o] = T_BYTE; break; |
|
1811 case 'C': in_elem_bt[o] = T_CHAR; break; |
|
1812 case 'D': in_elem_bt[o] = T_DOUBLE; break; |
|
1813 case 'F': in_elem_bt[o] = T_FLOAT; break; |
|
1814 case 'I': in_elem_bt[o] = T_INT; break; |
|
1815 case 'J': in_elem_bt[o] = T_LONG; break; |
|
1816 case 'S': in_elem_bt[o] = T_SHORT; break; |
|
1817 case 'Z': in_elem_bt[o] = T_BOOLEAN; break; |
|
1818 default: ShouldNotReachHere(); |
|
1819 } |
|
1820 } |
|
1821 } else { |
|
1822 in_elem_bt[o] = T_VOID; |
|
1823 switch(in_sig_bt[i]) { // PPC64: pass ints as longs. |
|
1824 case T_BOOLEAN: |
|
1825 case T_CHAR: |
|
1826 case T_BYTE: |
|
1827 case T_SHORT: |
|
1828 case T_INT: in_elem_bt[++o] = T_VOID; break; |
|
1829 default: break; |
|
1830 } |
|
1831 } |
|
1832 if (in_sig_bt[i] != T_VOID) { |
|
1833 assert(in_sig_bt[i] == ss.type(), "must match"); |
|
1834 ss.next(); |
|
1835 } |
|
1836 } |
|
1837 assert(i2l_argcnt==o, "must match"); |
|
1838 |
|
1839 convert_ints_to_longints(i2l_argcnt, total_in_args, in_sig_bt, in_regs); // PPC64: pass ints as longs. |
|
1840 |
|
1841 for (int i = 0; i < total_in_args ; i++ ) { |
|
1842 if (in_sig_bt[i] == T_ARRAY) { |
|
1843 // Arrays are passed as int, elem* pair. |
|
1844 out_sig_bt[argc++] = T_LONG; // PPC64: pass ints as longs. |
|
1845 out_sig_bt[argc++] = T_INT; |
|
1846 out_sig_bt[argc++] = T_ADDRESS; |
|
1847 } else { |
|
1848 out_sig_bt[argc++] = in_sig_bt[i]; |
|
1849 } |
|
1850 } |
|
1851 } |
|
1852 |
|
1853 |
|
1854 // Compute the wrapper's frame size. |
|
1855 // -------------------------------------------------------------------------- |
|
1856 |
|
1857 // Now figure out where the args must be stored and how much stack space |
|
1858 // they require. |
|
1859 // |
|
1860 // Compute framesize for the wrapper. We need to handlize all oops in |
|
1861 // incoming registers. |
|
1862 // |
|
1863 // Calculate the total number of stack slots we will need: |
|
1864 // 1) abi requirements |
|
1865 // 2) outgoing arguments |
|
1866 // 3) space for inbound oop handle area |
|
1867 // 4) space for handlizing a klass if static method |
|
1868 // 5) space for a lock if synchronized method |
|
1869 // 6) workspace for saving return values, int <-> float reg moves, etc. |
|
1870 // 7) alignment |
|
1871 // |
|
1872 // Layout of the native wrapper frame: |
|
1873 // (stack grows upwards, memory grows downwards) |
|
1874 // |
|
1875 // NW [ABI_112] <-- 1) R1_SP |
|
1876 // [outgoing arguments] <-- 2) R1_SP + out_arg_slot_offset |
|
1877 // [oopHandle area] <-- 3) R1_SP + oop_handle_offset (save area for critical natives) |
|
1878 // klass <-- 4) R1_SP + klass_offset |
|
1879 // lock <-- 5) R1_SP + lock_offset |
|
1880 // [workspace] <-- 6) R1_SP + workspace_offset |
|
1881 // [alignment] (optional) <-- 7) |
|
1882 // caller [JIT_TOP_ABI_48] <-- r_callers_sp |
|
1883 // |
|
1884 // - *_slot_offset Indicates offset from SP in number of stack slots. |
|
1885 // - *_offset Indicates offset from SP in bytes. |
|
1886 |
|
1887 int stack_slots = c_calling_convention(out_sig_bt, out_regs, out_regs2, total_c_args) // 1+2) |
|
1888 + SharedRuntime::out_preserve_stack_slots(); // See c_calling_convention. |
|
1889 |
|
1890 // Now the space for the inbound oop handle area. |
|
1891 int total_save_slots = num_java_iarg_registers * VMRegImpl::slots_per_word; |
|
1892 if (is_critical_native) { |
|
1893 // Critical natives may have to call out so they need a save area |
|
1894 // for register arguments. |
|
1895 int double_slots = 0; |
|
1896 int single_slots = 0; |
|
1897 for (int i = 0; i < total_in_args; i++) { |
|
1898 if (in_regs[i].first()->is_Register()) { |
|
1899 const Register reg = in_regs[i].first()->as_Register(); |
|
1900 switch (in_sig_bt[i]) { |
|
1901 case T_BOOLEAN: |
|
1902 case T_BYTE: |
|
1903 case T_SHORT: |
|
1904 case T_CHAR: |
|
1905 case T_INT: /*single_slots++;*/ break; // PPC64: pass ints as longs. |
|
1906 case T_ARRAY: |
|
1907 case T_LONG: double_slots++; break; |
|
1908 default: ShouldNotReachHere(); |
|
1909 } |
|
1910 } else if (in_regs[i].first()->is_FloatRegister()) { |
|
1911 switch (in_sig_bt[i]) { |
|
1912 case T_FLOAT: single_slots++; break; |
|
1913 case T_DOUBLE: double_slots++; break; |
|
1914 default: ShouldNotReachHere(); |
|
1915 } |
|
1916 } |
|
1917 } |
|
1918 total_save_slots = double_slots * 2 + round_to(single_slots, 2); // round to even |
|
1919 } |
|
1920 |
|
1921 int oop_handle_slot_offset = stack_slots; |
|
1922 stack_slots += total_save_slots; // 3) |
|
1923 |
|
1924 int klass_slot_offset = 0; |
|
1925 int klass_offset = -1; |
|
1926 if (method_is_static && !is_critical_native) { // 4) |
|
1927 klass_slot_offset = stack_slots; |
|
1928 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size; |
|
1929 stack_slots += VMRegImpl::slots_per_word; |
|
1930 } |
|
1931 |
|
1932 int lock_slot_offset = 0; |
|
1933 int lock_offset = -1; |
|
1934 if (method->is_synchronized()) { // 5) |
|
1935 lock_slot_offset = stack_slots; |
|
1936 lock_offset = lock_slot_offset * VMRegImpl::stack_slot_size; |
|
1937 stack_slots += VMRegImpl::slots_per_word; |
|
1938 } |
|
1939 |
|
1940 int workspace_slot_offset = stack_slots; // 6) |
|
1941 stack_slots += 2; |
|
1942 |
|
1943 // Now compute actual number of stack words we need. |
|
1944 // Rounding to make stack properly aligned. |
|
1945 stack_slots = round_to(stack_slots, // 7) |
|
1946 frame::alignment_in_bytes / VMRegImpl::stack_slot_size); |
|
1947 int frame_size_in_bytes = stack_slots * VMRegImpl::stack_slot_size; |
|
1948 |
|
1949 |
|
1950 // Now we can start generating code. |
|
1951 // -------------------------------------------------------------------------- |
|
1952 |
|
1953 intptr_t start_pc = (intptr_t)__ pc(); |
|
1954 intptr_t vep_start_pc; |
|
1955 intptr_t frame_done_pc; |
|
1956 intptr_t oopmap_pc; |
|
1957 |
|
1958 Label ic_miss; |
|
1959 Label handle_pending_exception; |
|
1960 |
|
1961 Register r_callers_sp = R21; |
|
1962 Register r_temp_1 = R22; |
|
1963 Register r_temp_2 = R23; |
|
1964 Register r_temp_3 = R24; |
|
1965 Register r_temp_4 = R25; |
|
1966 Register r_temp_5 = R26; |
|
1967 Register r_temp_6 = R27; |
|
1968 Register r_return_pc = R28; |
|
1969 |
|
1970 Register r_carg1_jnienv = noreg; |
|
1971 Register r_carg2_classorobject = noreg; |
|
1972 if (!is_critical_native) { |
|
1973 r_carg1_jnienv = out_regs[0].first()->as_Register(); |
|
1974 r_carg2_classorobject = out_regs[1].first()->as_Register(); |
|
1975 } |
|
1976 |
|
1977 |
|
1978 // Generate the Unverified Entry Point (UEP). |
|
1979 // -------------------------------------------------------------------------- |
|
1980 assert(start_pc == (intptr_t)__ pc(), "uep must be at start"); |
|
1981 |
|
1982 // Check ic: object class == cached class? |
|
1983 if (!method_is_static) { |
|
1984 Register ic = as_Register(Matcher::inline_cache_reg_encode()); |
|
1985 Register receiver_klass = r_temp_1; |
|
1986 |
|
1987 __ cmpdi(CCR0, R3_ARG1, 0); |
|
1988 __ beq(CCR0, ic_miss); |
|
1989 __ verify_oop(R3_ARG1); |
|
1990 __ load_klass(receiver_klass, R3_ARG1); |
|
1991 |
|
1992 __ cmpd(CCR0, receiver_klass, ic); |
|
1993 __ bne(CCR0, ic_miss); |
|
1994 } |
|
1995 |
|
1996 |
|
1997 // Generate the Verified Entry Point (VEP). |
|
1998 // -------------------------------------------------------------------------- |
|
1999 vep_start_pc = (intptr_t)__ pc(); |
|
2000 |
|
2001 __ save_LR_CR(r_temp_1); |
|
2002 __ generate_stack_overflow_check(frame_size_in_bytes); // Check before creating frame. |
|
2003 __ mr(r_callers_sp, R1_SP); // Remember frame pointer. |
|
2004 __ push_frame(frame_size_in_bytes, r_temp_1); // Push the c2n adapter's frame. |
|
2005 frame_done_pc = (intptr_t)__ pc(); |
|
2006 |
|
2007 // Native nmethod wrappers never take possesion of the oop arguments. |
|
2008 // So the caller will gc the arguments. |
|
2009 // The only thing we need an oopMap for is if the call is static. |
|
2010 // |
|
2011 // An OopMap for lock (and class if static), and one for the VM call itself. |
|
2012 OopMapSet *oop_maps = new OopMapSet(); |
|
2013 OopMap *oop_map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); |
|
2014 |
|
2015 if (is_critical_native) { |
|
2016 check_needs_gc_for_critical_native(masm, stack_slots, total_in_args, oop_handle_slot_offset, oop_maps, in_regs, in_sig_bt, r_temp_1); |
|
2017 } |
|
2018 |
|
2019 // Move arguments from register/stack to register/stack. |
|
2020 // -------------------------------------------------------------------------- |
|
2021 // |
|
2022 // We immediately shuffle the arguments so that for any vm call we have |
|
2023 // to make from here on out (sync slow path, jvmti, etc.) we will have |
|
2024 // captured the oops from our caller and have a valid oopMap for them. |
|
2025 // |
|
2026 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv* |
|
2027 // (derived from JavaThread* which is in R16_thread) and, if static, |
|
2028 // the class mirror instead of a receiver. This pretty much guarantees that |
|
2029 // register layout will not match. We ignore these extra arguments during |
|
2030 // the shuffle. The shuffle is described by the two calling convention |
|
2031 // vectors we have in our possession. We simply walk the java vector to |
|
2032 // get the source locations and the c vector to get the destinations. |
|
2033 |
|
2034 // Record sp-based slot for receiver on stack for non-static methods. |
|
2035 int receiver_offset = -1; |
|
2036 |
|
2037 // We move the arguments backward because the floating point registers |
|
2038 // destination will always be to a register with a greater or equal |
|
2039 // register number or the stack. |
|
2040 // in is the index of the incoming Java arguments |
|
2041 // out is the index of the outgoing C arguments |
|
2042 |
|
2043 #ifdef ASSERT |
|
2044 bool reg_destroyed[RegisterImpl::number_of_registers]; |
|
2045 bool freg_destroyed[FloatRegisterImpl::number_of_registers]; |
|
2046 for (int r = 0 ; r < RegisterImpl::number_of_registers ; r++) { |
|
2047 reg_destroyed[r] = false; |
|
2048 } |
|
2049 for (int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++) { |
|
2050 freg_destroyed[f] = false; |
|
2051 } |
|
2052 #endif // ASSERT |
|
2053 |
|
2054 for (int in = total_in_args - 1, out = total_c_args - 1; in >= 0 ; in--, out--) { |
|
2055 |
|
2056 #ifdef ASSERT |
|
2057 if (in_regs[in].first()->is_Register()) { |
|
2058 assert(!reg_destroyed[in_regs[in].first()->as_Register()->encoding()], "ack!"); |
|
2059 } else if (in_regs[in].first()->is_FloatRegister()) { |
|
2060 assert(!freg_destroyed[in_regs[in].first()->as_FloatRegister()->encoding()], "ack!"); |
|
2061 } |
|
2062 if (out_regs[out].first()->is_Register()) { |
|
2063 reg_destroyed[out_regs[out].first()->as_Register()->encoding()] = true; |
|
2064 } else if (out_regs[out].first()->is_FloatRegister()) { |
|
2065 freg_destroyed[out_regs[out].first()->as_FloatRegister()->encoding()] = true; |
|
2066 } |
|
2067 if (out_regs2[out].first()->is_Register()) { |
|
2068 reg_destroyed[out_regs2[out].first()->as_Register()->encoding()] = true; |
|
2069 } else if (out_regs2[out].first()->is_FloatRegister()) { |
|
2070 freg_destroyed[out_regs2[out].first()->as_FloatRegister()->encoding()] = true; |
|
2071 } |
|
2072 #endif // ASSERT |
|
2073 |
|
2074 switch (in_sig_bt[in]) { |
|
2075 case T_BOOLEAN: |
|
2076 case T_CHAR: |
|
2077 case T_BYTE: |
|
2078 case T_SHORT: |
|
2079 case T_INT: |
|
2080 guarantee(in > 0 && in_sig_bt[in-1] == T_LONG, |
|
2081 "expecting type (T_LONG,bt) for bt in {T_BOOLEAN, T_CHAR, T_BYTE, T_SHORT, T_INT}"); |
|
2082 break; |
|
2083 case T_LONG: |
|
2084 if (in_sig_bt[in+1] == T_VOID) { |
|
2085 long_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); |
|
2086 } else { |
|
2087 guarantee(in_sig_bt[in+1] == T_BOOLEAN || in_sig_bt[in+1] == T_CHAR || |
|
2088 in_sig_bt[in+1] == T_BYTE || in_sig_bt[in+1] == T_SHORT || |
|
2089 in_sig_bt[in+1] == T_INT, |
|
2090 "expecting type (T_LONG,bt) for bt in {T_BOOLEAN, T_CHAR, T_BYTE, T_SHORT, T_INT}"); |
|
2091 int_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); |
|
2092 } |
|
2093 break; |
|
2094 case T_ARRAY: |
|
2095 if (is_critical_native) { |
|
2096 int body_arg = out; |
|
2097 out -= 2; // Point to length arg. PPC64: pass ints as longs. |
|
2098 unpack_array_argument(masm, in_regs[in], in_elem_bt[in], out_regs[body_arg], out_regs[out], |
|
2099 r_callers_sp, r_temp_1, r_temp_2); |
|
2100 break; |
|
2101 } |
|
2102 case T_OBJECT: |
|
2103 assert(!is_critical_native, "no oop arguments"); |
|
2104 object_move(masm, stack_slots, |
|
2105 oop_map, oop_handle_slot_offset, |
|
2106 ((in == 0) && (!method_is_static)), &receiver_offset, |
|
2107 in_regs[in], out_regs[out], |
|
2108 r_callers_sp, r_temp_1, r_temp_2); |
|
2109 break; |
|
2110 case T_VOID: |
|
2111 break; |
|
2112 case T_FLOAT: |
|
2113 float_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); |
|
2114 if (out_regs2[out].first()->is_valid()) { |
|
2115 float_move(masm, in_regs[in], out_regs2[out], r_callers_sp, r_temp_1); |
|
2116 } |
|
2117 break; |
|
2118 case T_DOUBLE: |
|
2119 double_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1); |
|
2120 if (out_regs2[out].first()->is_valid()) { |
|
2121 double_move(masm, in_regs[in], out_regs2[out], r_callers_sp, r_temp_1); |
|
2122 } |
|
2123 break; |
|
2124 case T_ADDRESS: |
|
2125 fatal("found type (T_ADDRESS) in java args"); |
|
2126 break; |
|
2127 default: |
|
2128 ShouldNotReachHere(); |
|
2129 break; |
|
2130 } |
|
2131 } |
|
2132 |
|
2133 // Pre-load a static method's oop into ARG2. |
|
2134 // Used both by locking code and the normal JNI call code. |
|
2135 if (method_is_static && !is_critical_native) { |
|
2136 __ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()), |
|
2137 r_carg2_classorobject); |
|
2138 |
|
2139 // Now handlize the static class mirror in carg2. It's known not-null. |
|
2140 __ std(r_carg2_classorobject, klass_offset, R1_SP); |
|
2141 oop_map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); |
|
2142 __ addi(r_carg2_classorobject, R1_SP, klass_offset); |
|
2143 } |
|
2144 |
|
2145 // Get JNIEnv* which is first argument to native. |
|
2146 if (!is_critical_native) { |
|
2147 __ addi(r_carg1_jnienv, R16_thread, in_bytes(JavaThread::jni_environment_offset())); |
|
2148 } |
|
2149 |
|
2150 // NOTE: |
|
2151 // |
|
2152 // We have all of the arguments setup at this point. |
|
2153 // We MUST NOT touch any outgoing regs from this point on. |
|
2154 // So if we must call out we must push a new frame. |
|
2155 |
|
2156 // Get current pc for oopmap, and load it patchable relative to global toc. |
|
2157 oopmap_pc = (intptr_t) __ pc(); |
|
2158 __ calculate_address_from_global_toc(r_return_pc, (address)oopmap_pc, true, true, true, true); |
|
2159 |
|
2160 // We use the same pc/oopMap repeatedly when we call out. |
|
2161 oop_maps->add_gc_map(oopmap_pc - start_pc, oop_map); |
|
2162 |
|
2163 // r_return_pc now has the pc loaded that we will use when we finally call |
|
2164 // to native. |
|
2165 |
|
2166 // Make sure that thread is non-volatile; it crosses a bunch of VM calls below. |
|
2167 assert(R16_thread->is_nonvolatile(), "thread must be in non-volatile register"); |
|
2168 |
|
2169 |
|
2170 # if 0 |
|
2171 // DTrace method entry |
|
2172 # endif |
|
2173 |
|
2174 // Lock a synchronized method. |
|
2175 // -------------------------------------------------------------------------- |
|
2176 |
|
2177 if (method->is_synchronized()) { |
|
2178 assert(!is_critical_native, "unhandled"); |
|
2179 ConditionRegister r_flag = CCR1; |
|
2180 Register r_oop = r_temp_4; |
|
2181 const Register r_box = r_temp_5; |
|
2182 Label done, locked; |
|
2183 |
|
2184 // Load the oop for the object or class. r_carg2_classorobject contains |
|
2185 // either the handlized oop from the incoming arguments or the handlized |
|
2186 // class mirror (if the method is static). |
|
2187 __ ld(r_oop, 0, r_carg2_classorobject); |
|
2188 |
|
2189 // Get the lock box slot's address. |
|
2190 __ addi(r_box, R1_SP, lock_offset); |
|
2191 |
|
2192 # ifdef ASSERT |
|
2193 if (UseBiasedLocking) { |
|
2194 // Making the box point to itself will make it clear it went unused |
|
2195 // but also be obviously invalid. |
|
2196 __ std(r_box, 0, r_box); |
|
2197 } |
|
2198 # endif // ASSERT |
|
2199 |
|
2200 // Try fastpath for locking. |
|
2201 // fast_lock kills r_temp_1, r_temp_2, r_temp_3. |
|
2202 __ compiler_fast_lock_object(r_flag, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3); |
|
2203 __ beq(r_flag, locked); |
|
2204 |
|
2205 // None of the above fast optimizations worked so we have to get into the |
|
2206 // slow case of monitor enter. Inline a special case of call_VM that |
|
2207 // disallows any pending_exception. |
|
2208 |
|
2209 // Save argument registers and leave room for C-compatible ABI_112. |
|
2210 int frame_size = frame::abi_112_size + |
|
2211 round_to(total_c_args * wordSize, frame::alignment_in_bytes); |
|
2212 __ mr(R11_scratch1, R1_SP); |
|
2213 RegisterSaver::push_frame_and_save_argument_registers(masm, R12_scratch2, frame_size, total_c_args, out_regs, out_regs2); |
|
2214 |
|
2215 // Do the call. |
|
2216 __ set_last_Java_frame(R11_scratch1, r_return_pc); |
|
2217 assert(r_return_pc->is_nonvolatile(), "expecting return pc to be in non-volatile register"); |
|
2218 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), r_oop, r_box, R16_thread); |
|
2219 __ reset_last_Java_frame(); |
|
2220 |
|
2221 RegisterSaver::restore_argument_registers_and_pop_frame(masm, frame_size, total_c_args, out_regs, out_regs2); |
|
2222 |
|
2223 __ asm_assert_mem8_is_zero(thread_(pending_exception), |
|
2224 "no pending exception allowed on exit from SharedRuntime::complete_monitor_locking_C", 0); |
|
2225 |
|
2226 __ bind(locked); |
|
2227 } |
|
2228 |
|
2229 |
|
2230 // Publish thread state |
|
2231 // -------------------------------------------------------------------------- |
|
2232 |
|
2233 // Use that pc we placed in r_return_pc a while back as the current frame anchor. |
|
2234 __ set_last_Java_frame(R1_SP, r_return_pc); |
|
2235 |
|
2236 // Transition from _thread_in_Java to _thread_in_native. |
|
2237 __ li(R0, _thread_in_native); |
|
2238 __ release(); |
|
2239 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); |
|
2240 __ stw(R0, thread_(thread_state)); |
|
2241 if (UseMembar) { |
|
2242 __ fence(); |
|
2243 } |
|
2244 |
|
2245 |
|
2246 // The JNI call |
|
2247 // -------------------------------------------------------------------------- |
|
2248 |
|
2249 FunctionDescriptor* fd_native_method = (FunctionDescriptor*) native_func; |
|
2250 __ call_c(fd_native_method, relocInfo::runtime_call_type); |
|
2251 |
|
2252 |
|
2253 // Now, we are back from the native code. |
|
2254 |
|
2255 |
|
2256 // Unpack the native result. |
|
2257 // -------------------------------------------------------------------------- |
|
2258 |
|
2259 // For int-types, we do any needed sign-extension required. |
|
2260 // Care must be taken that the return values (R3_RET and F1_RET) |
|
2261 // will survive any VM calls for blocking or unlocking. |
|
2262 // An OOP result (handle) is done specially in the slow-path code. |
|
2263 |
|
2264 switch (ret_type) { |
|
2265 case T_VOID: break; // Nothing to do! |
|
2266 case T_FLOAT: break; // Got it where we want it (unless slow-path). |
|
2267 case T_DOUBLE: break; // Got it where we want it (unless slow-path). |
|
2268 case T_LONG: break; // Got it where we want it (unless slow-path). |
|
2269 case T_OBJECT: break; // Really a handle. |
|
2270 // Cannot de-handlize until after reclaiming jvm_lock. |
|
2271 case T_ARRAY: break; |
|
2272 |
|
2273 case T_BOOLEAN: { // 0 -> false(0); !0 -> true(1) |
|
2274 Label skip_modify; |
|
2275 __ cmpwi(CCR0, R3_RET, 0); |
|
2276 __ beq(CCR0, skip_modify); |
|
2277 __ li(R3_RET, 1); |
|
2278 __ bind(skip_modify); |
|
2279 break; |
|
2280 } |
|
2281 case T_BYTE: { // sign extension |
|
2282 __ extsb(R3_RET, R3_RET); |
|
2283 break; |
|
2284 } |
|
2285 case T_CHAR: { // unsigned result |
|
2286 __ andi(R3_RET, R3_RET, 0xffff); |
|
2287 break; |
|
2288 } |
|
2289 case T_SHORT: { // sign extension |
|
2290 __ extsh(R3_RET, R3_RET); |
|
2291 break; |
|
2292 } |
|
2293 case T_INT: // nothing to do |
|
2294 break; |
|
2295 default: |
|
2296 ShouldNotReachHere(); |
|
2297 break; |
|
2298 } |
|
2299 |
|
2300 |
|
2301 // Publish thread state |
|
2302 // -------------------------------------------------------------------------- |
|
2303 |
|
2304 // Switch thread to "native transition" state before reading the |
|
2305 // synchronization state. This additional state is necessary because reading |
|
2306 // and testing the synchronization state is not atomic w.r.t. GC, as this |
|
2307 // scenario demonstrates: |
|
2308 // - Java thread A, in _thread_in_native state, loads _not_synchronized |
|
2309 // and is preempted. |
|
2310 // - VM thread changes sync state to synchronizing and suspends threads |
|
2311 // for GC. |
|
2312 // - Thread A is resumed to finish this native method, but doesn't block |
|
2313 // here since it didn't see any synchronization in progress, and escapes. |
|
2314 |
|
2315 // Transition from _thread_in_native to _thread_in_native_trans. |
|
2316 __ li(R0, _thread_in_native_trans); |
|
2317 __ release(); |
|
2318 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); |
|
2319 __ stw(R0, thread_(thread_state)); |
|
2320 |
|
2321 |
|
2322 // Must we block? |
|
2323 // -------------------------------------------------------------------------- |
|
2324 |
|
2325 // Block, if necessary, before resuming in _thread_in_Java state. |
|
2326 // In order for GC to work, don't clear the last_Java_sp until after blocking. |
|
2327 Label after_transition; |
|
2328 { |
|
2329 Label no_block, sync; |
|
2330 |
|
2331 if (os::is_MP()) { |
|
2332 if (UseMembar) { |
|
2333 // Force this write out before the read below. |
|
2334 __ fence(); |
|
2335 } else { |
|
2336 // Write serialization page so VM thread can do a pseudo remote membar. |
|
2337 // We use the current thread pointer to calculate a thread specific |
|
2338 // offset to write to within the page. This minimizes bus traffic |
|
2339 // due to cache line collision. |
|
2340 __ serialize_memory(R16_thread, r_temp_4, r_temp_5); |
|
2341 } |
|
2342 } |
|
2343 |
|
2344 Register sync_state_addr = r_temp_4; |
|
2345 Register sync_state = r_temp_5; |
|
2346 Register suspend_flags = r_temp_6; |
|
2347 |
|
2348 __ load_const(sync_state_addr, SafepointSynchronize::address_of_state(), /*temp*/ sync_state); |
|
2349 |
|
2350 // TODO: PPC port assert(4 == SafepointSynchronize::sz_state(), "unexpected field size"); |
|
2351 __ lwz(sync_state, 0, sync_state_addr); |
|
2352 |
|
2353 // TODO: PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size"); |
|
2354 __ lwz(suspend_flags, thread_(suspend_flags)); |
|
2355 |
|
2356 __ acquire(); |
|
2357 |
|
2358 Label do_safepoint; |
|
2359 // No synchronization in progress nor yet synchronized. |
|
2360 __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized); |
|
2361 // Not suspended. |
|
2362 __ cmpwi(CCR1, suspend_flags, 0); |
|
2363 |
|
2364 __ bne(CCR0, sync); |
|
2365 __ beq(CCR1, no_block); |
|
2366 |
|
2367 // Block. Save any potential method result value before the operation and |
|
2368 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this |
|
2369 // lets us share the oopMap we used when we went native rather than create |
|
2370 // a distinct one for this pc. |
|
2371 __ bind(sync); |
|
2372 |
|
2373 address entry_point = is_critical_native |
|
2374 ? CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition) |
|
2375 : CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans); |
|
2376 save_native_result(masm, ret_type, workspace_slot_offset); |
|
2377 __ call_VM_leaf(entry_point, R16_thread); |
|
2378 restore_native_result(masm, ret_type, workspace_slot_offset); |
|
2379 |
|
2380 if (is_critical_native) { |
|
2381 __ b(after_transition); // No thread state transition here. |
|
2382 } |
|
2383 __ bind(no_block); |
|
2384 } |
|
2385 |
|
2386 // Publish thread state. |
|
2387 // -------------------------------------------------------------------------- |
|
2388 |
|
2389 // Thread state is thread_in_native_trans. Any safepoint blocking has |
|
2390 // already happened so we can now change state to _thread_in_Java. |
|
2391 |
|
2392 // Transition from _thread_in_native_trans to _thread_in_Java. |
|
2393 __ li(R0, _thread_in_Java); |
|
2394 __ release(); |
|
2395 // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size"); |
|
2396 __ stw(R0, thread_(thread_state)); |
|
2397 if (UseMembar) { |
|
2398 __ fence(); |
|
2399 } |
|
2400 __ bind(after_transition); |
|
2401 |
|
2402 // Reguard any pages if necessary. |
|
2403 // -------------------------------------------------------------------------- |
|
2404 |
|
2405 Label no_reguard; |
|
2406 __ lwz(r_temp_1, thread_(stack_guard_state)); |
|
2407 __ cmpwi(CCR0, r_temp_1, JavaThread::stack_guard_yellow_disabled); |
|
2408 __ bne(CCR0, no_reguard); |
|
2409 |
|
2410 save_native_result(masm, ret_type, workspace_slot_offset); |
|
2411 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); |
|
2412 restore_native_result(masm, ret_type, workspace_slot_offset); |
|
2413 |
|
2414 __ bind(no_reguard); |
|
2415 |
|
2416 |
|
2417 // Unlock |
|
2418 // -------------------------------------------------------------------------- |
|
2419 |
|
2420 if (method->is_synchronized()) { |
|
2421 |
|
2422 ConditionRegister r_flag = CCR1; |
|
2423 const Register r_oop = r_temp_4; |
|
2424 const Register r_box = r_temp_5; |
|
2425 const Register r_exception = r_temp_6; |
|
2426 Label done; |
|
2427 |
|
2428 // Get oop and address of lock object box. |
|
2429 if (method_is_static) { |
|
2430 assert(klass_offset != -1, ""); |
|
2431 __ ld(r_oop, klass_offset, R1_SP); |
|
2432 } else { |
|
2433 assert(receiver_offset != -1, ""); |
|
2434 __ ld(r_oop, receiver_offset, R1_SP); |
|
2435 } |
|
2436 __ addi(r_box, R1_SP, lock_offset); |
|
2437 |
|
2438 // Try fastpath for unlocking. |
|
2439 __ compiler_fast_unlock_object(r_flag, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3); |
|
2440 __ beq(r_flag, done); |
|
2441 |
|
2442 // Save and restore any potential method result value around the unlocking operation. |
|
2443 save_native_result(masm, ret_type, workspace_slot_offset); |
|
2444 |
|
2445 // Must save pending exception around the slow-path VM call. Since it's a |
|
2446 // leaf call, the pending exception (if any) can be kept in a register. |
|
2447 __ ld(r_exception, thread_(pending_exception)); |
|
2448 assert(r_exception->is_nonvolatile(), "exception register must be non-volatile"); |
|
2449 __ li(R0, 0); |
|
2450 __ std(R0, thread_(pending_exception)); |
|
2451 |
|
2452 // Slow case of monitor enter. |
|
2453 // Inline a special case of call_VM that disallows any pending_exception. |
|
2454 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), r_oop, r_box); |
|
2455 |
|
2456 __ asm_assert_mem8_is_zero(thread_(pending_exception), |
|
2457 "no pending exception allowed on exit from SharedRuntime::complete_monitor_unlocking_C", 0); |
|
2458 |
|
2459 restore_native_result(masm, ret_type, workspace_slot_offset); |
|
2460 |
|
2461 // Check_forward_pending_exception jump to forward_exception if any pending |
|
2462 // exception is set. The forward_exception routine expects to see the |
|
2463 // exception in pending_exception and not in a register. Kind of clumsy, |
|
2464 // since all folks who branch to forward_exception must have tested |
|
2465 // pending_exception first and hence have it in a register already. |
|
2466 __ std(r_exception, thread_(pending_exception)); |
|
2467 |
|
2468 __ bind(done); |
|
2469 } |
|
2470 |
|
2471 # if 0 |
|
2472 // DTrace method exit |
|
2473 # endif |
|
2474 |
|
2475 // Clear "last Java frame" SP and PC. |
|
2476 // -------------------------------------------------------------------------- |
|
2477 |
|
2478 __ reset_last_Java_frame(); |
|
2479 |
|
2480 // Unpack oop result. |
|
2481 // -------------------------------------------------------------------------- |
|
2482 |
|
2483 if (ret_type == T_OBJECT || ret_type == T_ARRAY) { |
|
2484 Label skip_unboxing; |
|
2485 __ cmpdi(CCR0, R3_RET, 0); |
|
2486 __ beq(CCR0, skip_unboxing); |
|
2487 __ ld(R3_RET, 0, R3_RET); |
|
2488 __ bind(skip_unboxing); |
|
2489 __ verify_oop(R3_RET); |
|
2490 } |
|
2491 |
|
2492 |
|
2493 // Reset handle block. |
|
2494 // -------------------------------------------------------------------------- |
|
2495 if (!is_critical_native) { |
|
2496 __ ld(r_temp_1, thread_(active_handles)); |
|
2497 // TODO: PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size"); |
|
2498 __ li(r_temp_2, 0); |
|
2499 __ stw(r_temp_2, JNIHandleBlock::top_offset_in_bytes(), r_temp_1); |
|
2500 |
|
2501 |
|
2502 // Check for pending exceptions. |
|
2503 // -------------------------------------------------------------------------- |
|
2504 __ ld(r_temp_2, thread_(pending_exception)); |
|
2505 __ cmpdi(CCR0, r_temp_2, 0); |
|
2506 __ bne(CCR0, handle_pending_exception); |
|
2507 } |
|
2508 |
|
2509 // Return |
|
2510 // -------------------------------------------------------------------------- |
|
2511 |
|
2512 __ pop_frame(); |
|
2513 __ restore_LR_CR(R11); |
|
2514 __ blr(); |
|
2515 |
|
2516 |
|
2517 // Handler for pending exceptions (out-of-line). |
|
2518 // -------------------------------------------------------------------------- |
|
2519 |
|
2520 // Since this is a native call, we know the proper exception handler |
|
2521 // is the empty function. We just pop this frame and then jump to |
|
2522 // forward_exception_entry. |
|
2523 if (!is_critical_native) { |
|
2524 __ align(InteriorEntryAlignment); |
|
2525 __ bind(handle_pending_exception); |
|
2526 |
|
2527 __ pop_frame(); |
|
2528 __ restore_LR_CR(R11); |
|
2529 __ b64_patchable((address)StubRoutines::forward_exception_entry(), |
|
2530 relocInfo::runtime_call_type); |
|
2531 } |
|
2532 |
|
2533 // Handler for a cache miss (out-of-line). |
|
2534 // -------------------------------------------------------------------------- |
|
2535 |
|
2536 if (!method_is_static) { |
|
2537 __ align(InteriorEntryAlignment); |
|
2538 __ bind(ic_miss); |
|
2539 |
|
2540 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), |
|
2541 relocInfo::runtime_call_type); |
|
2542 } |
|
2543 |
|
2544 // Done. |
|
2545 // -------------------------------------------------------------------------- |
|
2546 |
|
2547 __ flush(); |
|
2548 |
|
2549 nmethod *nm = nmethod::new_native_nmethod(method, |
|
2550 compile_id, |
|
2551 masm->code(), |
|
2552 vep_start_pc-start_pc, |
|
2553 frame_done_pc-start_pc, |
|
2554 stack_slots / VMRegImpl::slots_per_word, |
|
2555 (method_is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)), |
|
2556 in_ByteSize(lock_offset), |
|
2557 oop_maps); |
|
2558 |
|
2559 if (is_critical_native) { |
|
2560 nm->set_lazy_critical_native(true); |
|
2561 } |
|
2562 |
|
2563 return nm; |
|
2564 #else |
|
2565 ShouldNotReachHere(); |
|
2566 return NULL; |
|
2567 #endif // COMPILER2 |
|
2568 } |
|
2569 |
|
2570 // This function returns the adjust size (in number of words) to a c2i adapter |
|
2571 // activation for use during deoptimization. |
|
2572 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) { |
|
2573 return round_to((callee_locals - callee_parameters) * Interpreter::stackElementWords, frame::alignment_in_bytes); |
|
2574 } |
|
2575 |
|
2576 uint SharedRuntime::out_preserve_stack_slots() { |
|
2577 #ifdef COMPILER2 |
|
2578 return frame::jit_out_preserve_size / VMRegImpl::stack_slot_size; |
|
2579 #else |
|
2580 return 0; |
|
2581 #endif |
|
2582 } |
|
2583 |
|
2584 #ifdef COMPILER2 |
|
2585 // Frame generation for deopt and uncommon trap blobs. |
|
2586 static void push_skeleton_frame(MacroAssembler* masm, bool deopt, |
|
2587 /* Read */ |
|
2588 Register unroll_block_reg, |
|
2589 /* Update */ |
|
2590 Register frame_sizes_reg, |
|
2591 Register number_of_frames_reg, |
|
2592 Register pcs_reg, |
|
2593 /* Invalidate */ |
|
2594 Register frame_size_reg, |
|
2595 Register pc_reg) { |
|
2596 |
|
2597 __ ld(pc_reg, 0, pcs_reg); |
|
2598 __ ld(frame_size_reg, 0, frame_sizes_reg); |
|
2599 __ std(pc_reg, _abi(lr), R1_SP); |
|
2600 __ push_frame(frame_size_reg, R0/*tmp*/); |
|
2601 __ std(R1_SP, _parent_ijava_frame_abi(initial_caller_sp), R1_SP); |
|
2602 __ addi(number_of_frames_reg, number_of_frames_reg, -1); |
|
2603 __ addi(frame_sizes_reg, frame_sizes_reg, wordSize); |
|
2604 __ addi(pcs_reg, pcs_reg, wordSize); |
|
2605 } |
|
2606 |
|
2607 // Loop through the UnrollBlock info and create new frames. |
|
2608 static void push_skeleton_frames(MacroAssembler* masm, bool deopt, |
|
2609 /* read */ |
|
2610 Register unroll_block_reg, |
|
2611 /* invalidate */ |
|
2612 Register frame_sizes_reg, |
|
2613 Register number_of_frames_reg, |
|
2614 Register pcs_reg, |
|
2615 Register frame_size_reg, |
|
2616 Register pc_reg) { |
|
2617 Label loop; |
|
2618 |
|
2619 // _number_of_frames is of type int (deoptimization.hpp) |
|
2620 __ lwa(number_of_frames_reg, |
|
2621 Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), |
|
2622 unroll_block_reg); |
|
2623 __ ld(pcs_reg, |
|
2624 Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), |
|
2625 unroll_block_reg); |
|
2626 __ ld(frame_sizes_reg, |
|
2627 Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(), |
|
2628 unroll_block_reg); |
|
2629 |
|
2630 // stack: (caller_of_deoptee, ...). |
|
2631 |
|
2632 // At this point we either have an interpreter frame or a compiled |
|
2633 // frame on top of stack. If it is a compiled frame we push a new c2i |
|
2634 // adapter here |
|
2635 |
|
2636 // Memorize top-frame stack-pointer. |
|
2637 __ mr(frame_size_reg/*old_sp*/, R1_SP); |
|
2638 |
|
2639 // Resize interpreter top frame OR C2I adapter. |
|
2640 |
|
2641 // At this moment, the top frame (which is the caller of the deoptee) is |
|
2642 // an interpreter frame or a newly pushed C2I adapter or an entry frame. |
|
2643 // The top frame has a TOP_IJAVA_FRAME_ABI and the frame contains the |
|
2644 // outgoing arguments. |
|
2645 // |
|
2646 // In order to push the interpreter frame for the deoptee, we need to |
|
2647 // resize the top frame such that we are able to place the deoptee's |
|
2648 // locals in the frame. |
|
2649 // Additionally, we have to turn the top frame's TOP_IJAVA_FRAME_ABI |
|
2650 // into a valid PARENT_IJAVA_FRAME_ABI. |
|
2651 |
|
2652 __ lwa(R11_scratch1, |
|
2653 Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(), |
|
2654 unroll_block_reg); |
|
2655 __ neg(R11_scratch1, R11_scratch1); |
|
2656 |
|
2657 // R11_scratch1 contains size of locals for frame resizing. |
|
2658 // R12_scratch2 contains top frame's lr. |
|
2659 |
|
2660 // Resize frame by complete frame size prevents TOC from being |
|
2661 // overwritten by locals. A more stack space saving way would be |
|
2662 // to copy the TOC to its location in the new abi. |
|
2663 __ addi(R11_scratch1, R11_scratch1, - frame::parent_ijava_frame_abi_size); |
|
2664 |
|
2665 // now, resize the frame |
|
2666 __ resize_frame(R11_scratch1, pc_reg/*tmp*/); |
|
2667 |
|
2668 // In the case where we have resized a c2i frame above, the optional |
|
2669 // alignment below the locals has size 32 (why?). |
|
2670 __ std(R12_scratch2, _abi(lr), R1_SP); |
|
2671 |
|
2672 // Initialize initial_caller_sp. |
|
2673 __ std(frame_size_reg/*old_sp*/, _parent_ijava_frame_abi(initial_caller_sp), R1_SP); |
|
2674 |
|
2675 #ifdef ASSERT |
|
2676 // Make sure that there is at least one entry in the array. |
|
2677 __ cmpdi(CCR0, number_of_frames_reg, 0); |
|
2678 __ asm_assert_ne("array_size must be > 0", 0x205); |
|
2679 #endif |
|
2680 |
|
2681 // Now push the new interpreter frames. |
|
2682 // |
|
2683 __ bind(loop); |
|
2684 // Allocate a new frame, fill in the pc. |
|
2685 push_skeleton_frame(masm, deopt, |
|
2686 unroll_block_reg, |
|
2687 frame_sizes_reg, |
|
2688 number_of_frames_reg, |
|
2689 pcs_reg, |
|
2690 frame_size_reg, |
|
2691 pc_reg); |
|
2692 __ cmpdi(CCR0, number_of_frames_reg, 0); |
|
2693 __ bne(CCR0, loop); |
|
2694 |
|
2695 // Get the return address pointing into the frame manager. |
|
2696 __ ld(R0, 0, pcs_reg); |
|
2697 // Store it in the top interpreter frame. |
|
2698 __ std(R0, _abi(lr), R1_SP); |
|
2699 // Initialize frame_manager_lr of interpreter top frame. |
|
2700 __ std(R0, _top_ijava_frame_abi(frame_manager_lr), R1_SP); |
|
2701 } |
|
2702 #endif |
|
2703 |
|
2704 void SharedRuntime::generate_deopt_blob() { |
|
2705 // Allocate space for the code |
|
2706 ResourceMark rm; |
|
2707 // Setup code generation tools |
|
2708 CodeBuffer buffer("deopt_blob", 2048, 1024); |
|
2709 InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer); |
|
2710 Label exec_mode_initialized; |
|
2711 int frame_size_in_words; |
|
2712 OopMap* map = NULL; |
|
2713 OopMapSet *oop_maps = new OopMapSet(); |
|
2714 |
|
2715 // size of ABI112 plus spill slots for R3_RET and F1_RET. |
|
2716 const int frame_size_in_bytes = frame::abi_112_spill_size; |
|
2717 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); |
|
2718 int first_frame_size_in_bytes = 0; // frame size of "unpack frame" for call to fetch_unroll_info. |
|
2719 |
|
2720 const Register exec_mode_reg = R21_tmp1; |
|
2721 |
|
2722 const address start = __ pc(); |
|
2723 |
|
2724 #ifdef COMPILER2 |
|
2725 // -------------------------------------------------------------------------- |
|
2726 // Prolog for non exception case! |
|
2727 |
|
2728 // We have been called from the deopt handler of the deoptee. |
|
2729 // |
|
2730 // deoptee: |
|
2731 // ... |
|
2732 // call X |
|
2733 // ... |
|
2734 // deopt_handler: call_deopt_stub |
|
2735 // cur. return pc --> ... |
|
2736 // |
|
2737 // So currently SR_LR points behind the call in the deopt handler. |
|
2738 // We adjust it such that it points to the start of the deopt handler. |
|
2739 // The return_pc has been stored in the frame of the deoptee and |
|
2740 // will replace the address of the deopt_handler in the call |
|
2741 // to Deoptimization::fetch_unroll_info below. |
|
2742 // We can't grab a free register here, because all registers may |
|
2743 // contain live values, so let the RegisterSaver do the adjustment |
|
2744 // of the return pc. |
|
2745 const int return_pc_adjustment_no_exception = -size_deopt_handler(); |
|
2746 |
|
2747 // Push the "unpack frame" |
|
2748 // Save everything in sight. |
|
2749 map = RegisterSaver::push_frame_abi112_and_save_live_registers(masm, |
|
2750 &first_frame_size_in_bytes, |
|
2751 /*generate_oop_map=*/ true, |
|
2752 return_pc_adjustment_no_exception, |
|
2753 RegisterSaver::return_pc_is_lr); |
|
2754 assert(map != NULL, "OopMap must have been created"); |
|
2755 |
|
2756 __ li(exec_mode_reg, Deoptimization::Unpack_deopt); |
|
2757 // Save exec mode for unpack_frames. |
|
2758 __ b(exec_mode_initialized); |
|
2759 |
|
2760 // -------------------------------------------------------------------------- |
|
2761 // Prolog for exception case |
|
2762 |
|
2763 // An exception is pending. |
|
2764 // We have been called with a return (interpreter) or a jump (exception blob). |
|
2765 // |
|
2766 // - R3_ARG1: exception oop |
|
2767 // - R4_ARG2: exception pc |
|
2768 |
|
2769 int exception_offset = __ pc() - start; |
|
2770 |
|
2771 BLOCK_COMMENT("Prolog for exception case"); |
|
2772 |
|
2773 // The RegisterSaves doesn't need to adjust the return pc for this situation. |
|
2774 const int return_pc_adjustment_exception = 0; |
|
2775 |
|
2776 // Push the "unpack frame". |
|
2777 // Save everything in sight. |
|
2778 assert(R4 == R4_ARG2, "exception pc must be in r4"); |
|
2779 RegisterSaver::push_frame_abi112_and_save_live_registers(masm, |
|
2780 &first_frame_size_in_bytes, |
|
2781 /*generate_oop_map=*/ false, |
|
2782 return_pc_adjustment_exception, |
|
2783 RegisterSaver::return_pc_is_r4); |
|
2784 |
|
2785 // Deopt during an exception. Save exec mode for unpack_frames. |
|
2786 __ li(exec_mode_reg, Deoptimization::Unpack_exception); |
|
2787 |
|
2788 // Store exception oop and pc in thread (location known to GC). |
|
2789 // This is needed since the call to "fetch_unroll_info()" may safepoint. |
|
2790 __ std(R3_ARG1, in_bytes(JavaThread::exception_oop_offset()), R16_thread); |
|
2791 __ std(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread); |
|
2792 |
|
2793 // fall through |
|
2794 |
|
2795 // -------------------------------------------------------------------------- |
|
2796 __ BIND(exec_mode_initialized); |
|
2797 |
|
2798 { |
|
2799 const Register unroll_block_reg = R22_tmp2; |
|
2800 |
|
2801 // We need to set `last_Java_frame' because `fetch_unroll_info' will |
|
2802 // call `last_Java_frame()'. The value of the pc in the frame is not |
|
2803 // particularly important. It just needs to identify this blob. |
|
2804 __ set_last_Java_frame(R1_SP, noreg); |
|
2805 |
|
2806 // With EscapeAnalysis turned on, this call may safepoint! |
|
2807 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), R16_thread); |
|
2808 address calls_return_pc = __ last_calls_return_pc(); |
|
2809 // Set an oopmap for the call site that describes all our saved registers. |
|
2810 oop_maps->add_gc_map(calls_return_pc - start, map); |
|
2811 |
|
2812 __ reset_last_Java_frame(); |
|
2813 // Save the return value. |
|
2814 __ mr(unroll_block_reg, R3_RET); |
|
2815 |
|
2816 // Restore only the result registers that have been saved |
|
2817 // by save_volatile_registers(...). |
|
2818 RegisterSaver::restore_result_registers(masm, first_frame_size_in_bytes); |
|
2819 |
|
2820 // In excp_deopt_mode, restore and clear exception oop which we |
|
2821 // stored in the thread during exception entry above. The exception |
|
2822 // oop will be the return value of this stub. |
|
2823 Label skip_restore_excp; |
|
2824 __ cmpdi(CCR0, exec_mode_reg, Deoptimization::Unpack_exception); |
|
2825 __ bne(CCR0, skip_restore_excp); |
|
2826 __ ld(R3_RET, in_bytes(JavaThread::exception_oop_offset()), R16_thread); |
|
2827 __ ld(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread); |
|
2828 __ li(R0, 0); |
|
2829 __ std(R0, in_bytes(JavaThread::exception_pc_offset()), R16_thread); |
|
2830 __ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread); |
|
2831 __ BIND(skip_restore_excp); |
|
2832 |
|
2833 // reload narrro_oop_base |
|
2834 if (UseCompressedOops && Universe::narrow_oop_base() != 0) { |
|
2835 __ load_const_optimized(R30, Universe::narrow_oop_base()); |
|
2836 } |
|
2837 |
|
2838 __ pop_frame(); |
|
2839 |
|
2840 // stack: (deoptee, optional i2c, caller of deoptee, ...). |
|
2841 |
|
2842 // pop the deoptee's frame |
|
2843 __ pop_frame(); |
|
2844 |
|
2845 // stack: (caller_of_deoptee, ...). |
|
2846 |
|
2847 // Loop through the `UnrollBlock' info and create interpreter frames. |
|
2848 push_skeleton_frames(masm, true/*deopt*/, |
|
2849 unroll_block_reg, |
|
2850 R23_tmp3, |
|
2851 R24_tmp4, |
|
2852 R25_tmp5, |
|
2853 R26_tmp6, |
|
2854 R27_tmp7); |
|
2855 |
|
2856 // stack: (skeletal interpreter frame, ..., optional skeletal |
|
2857 // interpreter frame, optional c2i, caller of deoptee, ...). |
|
2858 } |
|
2859 |
|
2860 // push an `unpack_frame' taking care of float / int return values. |
|
2861 __ push_frame(frame_size_in_bytes, R0/*tmp*/); |
|
2862 |
|
2863 // stack: (unpack frame, skeletal interpreter frame, ..., optional |
|
2864 // skeletal interpreter frame, optional c2i, caller of deoptee, |
|
2865 // ...). |
|
2866 |
|
2867 // Spill live volatile registers since we'll do a call. |
|
2868 __ std( R3_RET, _abi_112_spill(spill_ret), R1_SP); |
|
2869 __ stfd(F1_RET, _abi_112_spill(spill_fret), R1_SP); |
|
2870 |
|
2871 // Let the unpacker layout information in the skeletal frames just |
|
2872 // allocated. |
|
2873 __ get_PC_trash_LR(R3_RET); |
|
2874 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R3_RET); |
|
2875 // This is a call to a LEAF method, so no oop map is required. |
|
2876 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), |
|
2877 R16_thread/*thread*/, exec_mode_reg/*exec_mode*/); |
|
2878 __ reset_last_Java_frame(); |
|
2879 |
|
2880 // Restore the volatiles saved above. |
|
2881 __ ld( R3_RET, _abi_112_spill(spill_ret), R1_SP); |
|
2882 __ lfd(F1_RET, _abi_112_spill(spill_fret), R1_SP); |
|
2883 |
|
2884 // Pop the unpack frame. |
|
2885 __ pop_frame(); |
|
2886 __ restore_LR_CR(R0); |
|
2887 |
|
2888 // stack: (top interpreter frame, ..., optional interpreter frame, |
|
2889 // optional c2i, caller of deoptee, ...). |
|
2890 |
|
2891 // Initialize R14_state. |
|
2892 __ ld(R14_state, 0, R1_SP); |
|
2893 __ addi(R14_state, R14_state, |
|
2894 -frame::interpreter_frame_cinterpreterstate_size_in_bytes()); |
|
2895 // Also inititialize R15_prev_state. |
|
2896 __ restore_prev_state(); |
|
2897 |
|
2898 // Return to the interpreter entry point. |
|
2899 __ blr(); |
|
2900 __ flush(); |
|
2901 #else // COMPILER2 |
|
2902 __ unimplemented("deopt blob needed only with compiler"); |
|
2903 int exception_offset = __ pc() - start; |
|
2904 #endif // COMPILER2 |
|
2905 |
|
2906 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, 0, first_frame_size_in_bytes / wordSize); |
|
2907 } |
|
2908 |
|
2909 #ifdef COMPILER2 |
|
2910 void SharedRuntime::generate_uncommon_trap_blob() { |
|
2911 // Allocate space for the code. |
|
2912 ResourceMark rm; |
|
2913 // Setup code generation tools. |
|
2914 CodeBuffer buffer("uncommon_trap_blob", 2048, 1024); |
|
2915 InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer); |
|
2916 address start = __ pc(); |
|
2917 |
|
2918 Register unroll_block_reg = R21_tmp1; |
|
2919 Register klass_index_reg = R22_tmp2; |
|
2920 Register unc_trap_reg = R23_tmp3; |
|
2921 |
|
2922 OopMapSet* oop_maps = new OopMapSet(); |
|
2923 int frame_size_in_bytes = frame::abi_112_size; |
|
2924 OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); |
|
2925 |
|
2926 // stack: (deoptee, optional i2c, caller_of_deoptee, ...). |
|
2927 |
|
2928 // Push a dummy `unpack_frame' and call |
|
2929 // `Deoptimization::uncommon_trap' to pack the compiled frame into a |
|
2930 // vframe array and return the `UnrollBlock' information. |
|
2931 |
|
2932 // Save LR to compiled frame. |
|
2933 __ save_LR_CR(R11_scratch1); |
|
2934 |
|
2935 // Push an "uncommon_trap" frame. |
|
2936 __ push_frame_abi112(0, R11_scratch1); |
|
2937 |
|
2938 // stack: (unpack frame, deoptee, optional i2c, caller_of_deoptee, ...). |
|
2939 |
|
2940 // Set the `unpack_frame' as last_Java_frame. |
|
2941 // `Deoptimization::uncommon_trap' expects it and considers its |
|
2942 // sender frame as the deoptee frame. |
|
2943 // Remember the offset of the instruction whose address will be |
|
2944 // moved to R11_scratch1. |
|
2945 address gc_map_pc = __ get_PC_trash_LR(R11_scratch1); |
|
2946 |
|
2947 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1); |
|
2948 |
|
2949 __ mr(klass_index_reg, R3); |
|
2950 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), |
|
2951 R16_thread, klass_index_reg); |
|
2952 |
|
2953 // Set an oopmap for the call site. |
|
2954 oop_maps->add_gc_map(gc_map_pc - start, map); |
|
2955 |
|
2956 __ reset_last_Java_frame(); |
|
2957 |
|
2958 // Pop the `unpack frame'. |
|
2959 __ pop_frame(); |
|
2960 |
|
2961 // stack: (deoptee, optional i2c, caller_of_deoptee, ...). |
|
2962 |
|
2963 // Save the return value. |
|
2964 __ mr(unroll_block_reg, R3_RET); |
|
2965 |
|
2966 // Pop the uncommon_trap frame. |
|
2967 __ pop_frame(); |
|
2968 |
|
2969 // stack: (caller_of_deoptee, ...). |
|
2970 |
|
2971 // Allocate new interpreter frame(s) and possibly a c2i adapter |
|
2972 // frame. |
|
2973 push_skeleton_frames(masm, false/*deopt*/, |
|
2974 unroll_block_reg, |
|
2975 R22_tmp2, |
|
2976 R23_tmp3, |
|
2977 R24_tmp4, |
|
2978 R25_tmp5, |
|
2979 R26_tmp6); |
|
2980 |
|
2981 // stack: (skeletal interpreter frame, ..., optional skeletal |
|
2982 // interpreter frame, optional c2i, caller of deoptee, ...). |
|
2983 |
|
2984 // Push a dummy `unpack_frame' taking care of float return values. |
|
2985 // Call `Deoptimization::unpack_frames' to layout information in the |
|
2986 // interpreter frames just created. |
|
2987 |
|
2988 // Push a simple "unpack frame" here. |
|
2989 __ push_frame_abi112(0, R11_scratch1); |
|
2990 |
|
2991 // stack: (unpack frame, skeletal interpreter frame, ..., optional |
|
2992 // skeletal interpreter frame, optional c2i, caller of deoptee, |
|
2993 // ...). |
|
2994 |
|
2995 // Set the "unpack_frame" as last_Java_frame. |
|
2996 __ get_PC_trash_LR(R11_scratch1); |
|
2997 __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1); |
|
2998 |
|
2999 // Indicate it is the uncommon trap case. |
|
3000 __ li(unc_trap_reg, Deoptimization::Unpack_uncommon_trap); |
|
3001 // Let the unpacker layout information in the skeletal frames just |
|
3002 // allocated. |
|
3003 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), |
|
3004 R16_thread, unc_trap_reg); |
|
3005 |
|
3006 __ reset_last_Java_frame(); |
|
3007 // Pop the `unpack frame'. |
|
3008 __ pop_frame(); |
|
3009 // Restore LR from top interpreter frame. |
|
3010 __ restore_LR_CR(R11_scratch1); |
|
3011 |
|
3012 // stack: (top interpreter frame, ..., optional interpreter frame, |
|
3013 // optional c2i, caller of deoptee, ...). |
|
3014 |
|
3015 // Initialize R14_state, ... |
|
3016 __ ld(R11_scratch1, 0, R1_SP); |
|
3017 __ addi(R14_state, R11_scratch1, |
|
3018 -frame::interpreter_frame_cinterpreterstate_size_in_bytes()); |
|
3019 // also initialize R15_prev_state. |
|
3020 __ restore_prev_state(); |
|
3021 // Return to the interpreter entry point. |
|
3022 __ blr(); |
|
3023 |
|
3024 masm->flush(); |
|
3025 |
|
3026 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, frame_size_in_bytes/wordSize); |
|
3027 } |
|
3028 #endif // COMPILER2 |
|
3029 |
|
3030 // Generate a special Compile2Runtime blob that saves all registers, and setup oopmap. |
|
3031 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) { |
|
3032 assert(StubRoutines::forward_exception_entry() != NULL, |
|
3033 "must be generated before"); |
|
3034 |
|
3035 ResourceMark rm; |
|
3036 OopMapSet *oop_maps = new OopMapSet(); |
|
3037 OopMap* map; |
|
3038 |
|
3039 // Allocate space for the code. Setup code generation tools. |
|
3040 CodeBuffer buffer("handler_blob", 2048, 1024); |
|
3041 MacroAssembler* masm = new MacroAssembler(&buffer); |
|
3042 |
|
3043 address start = __ pc(); |
|
3044 int frame_size_in_bytes = 0; |
|
3045 |
|
3046 RegisterSaver::ReturnPCLocation return_pc_location; |
|
3047 bool cause_return = (poll_type == POLL_AT_RETURN); |
|
3048 if (cause_return) { |
|
3049 // Nothing to do here. The frame has already been popped in MachEpilogNode. |
|
3050 // Register LR already contains the return pc. |
|
3051 return_pc_location = RegisterSaver::return_pc_is_lr; |
|
3052 } else { |
|
3053 // Use thread()->saved_exception_pc() as return pc. |
|
3054 return_pc_location = RegisterSaver::return_pc_is_thread_saved_exception_pc; |
|
3055 } |
|
3056 |
|
3057 // Save registers, fpu state, and flags. |
|
3058 map = RegisterSaver::push_frame_abi112_and_save_live_registers(masm, |
|
3059 &frame_size_in_bytes, |
|
3060 /*generate_oop_map=*/ true, |
|
3061 /*return_pc_adjustment=*/0, |
|
3062 return_pc_location); |
|
3063 |
|
3064 // The following is basically a call_VM. However, we need the precise |
|
3065 // address of the call in order to generate an oopmap. Hence, we do all the |
|
3066 // work outselves. |
|
3067 __ set_last_Java_frame(/*sp=*/R1_SP, /*pc=*/noreg); |
|
3068 |
|
3069 // The return address must always be correct so that the frame constructor |
|
3070 // never sees an invalid pc. |
|
3071 |
|
3072 // Do the call |
|
3073 __ call_VM_leaf(call_ptr, R16_thread); |
|
3074 address calls_return_pc = __ last_calls_return_pc(); |
|
3075 |
|
3076 // Set an oopmap for the call site. This oopmap will map all |
|
3077 // oop-registers and debug-info registers as callee-saved. This |
|
3078 // will allow deoptimization at this safepoint to find all possible |
|
3079 // debug-info recordings, as well as let GC find all oops. |
|
3080 oop_maps->add_gc_map(calls_return_pc - start, map); |
|
3081 |
|
3082 Label noException; |
|
3083 |
|
3084 // Clear the last Java frame. |
|
3085 __ reset_last_Java_frame(); |
|
3086 |
|
3087 BLOCK_COMMENT(" Check pending exception."); |
|
3088 const Register pending_exception = R0; |
|
3089 __ ld(pending_exception, thread_(pending_exception)); |
|
3090 __ cmpdi(CCR0, pending_exception, 0); |
|
3091 __ beq(CCR0, noException); |
|
3092 |
|
3093 // Exception pending |
|
3094 RegisterSaver::restore_live_registers_and_pop_frame(masm, |
|
3095 frame_size_in_bytes, |
|
3096 /*restore_ctr=*/true); |
|
3097 |
|
3098 |
|
3099 BLOCK_COMMENT(" Jump to forward_exception_entry."); |
|
3100 // Jump to forward_exception_entry, with the issuing PC in LR |
|
3101 // so it looks like the original nmethod called forward_exception_entry. |
|
3102 __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); |
|
3103 |
|
3104 // No exception case. |
|
3105 __ BIND(noException); |
|
3106 |
|
3107 |
|
3108 // Normal exit, restore registers and exit. |
|
3109 RegisterSaver::restore_live_registers_and_pop_frame(masm, |
|
3110 frame_size_in_bytes, |
|
3111 /*restore_ctr=*/true); |
|
3112 |
|
3113 __ blr(); |
|
3114 |
|
3115 // Make sure all code is generated |
|
3116 masm->flush(); |
|
3117 |
|
3118 // Fill-out other meta info |
|
3119 // CodeBlob frame size is in words. |
|
3120 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_bytes / wordSize); |
|
3121 } |
|
3122 |
|
3123 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss) |
|
3124 // |
|
3125 // Generate a stub that calls into the vm to find out the proper destination |
|
3126 // of a java call. All the argument registers are live at this point |
|
3127 // but since this is generic code we don't know what they are and the caller |
|
3128 // must do any gc of the args. |
|
3129 // |
|
3130 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) { |
|
3131 |
|
3132 // allocate space for the code |
|
3133 ResourceMark rm; |
|
3134 |
|
3135 CodeBuffer buffer(name, 1000, 512); |
|
3136 MacroAssembler* masm = new MacroAssembler(&buffer); |
|
3137 |
|
3138 int frame_size_in_bytes; |
|
3139 |
|
3140 OopMapSet *oop_maps = new OopMapSet(); |
|
3141 OopMap* map = NULL; |
|
3142 |
|
3143 address start = __ pc(); |
|
3144 |
|
3145 map = RegisterSaver::push_frame_abi112_and_save_live_registers(masm, |
|
3146 &frame_size_in_bytes, |
|
3147 /*generate_oop_map*/ true, |
|
3148 /*return_pc_adjustment*/ 0, |
|
3149 RegisterSaver::return_pc_is_lr); |
|
3150 |
|
3151 // Use noreg as last_Java_pc, the return pc will be reconstructed |
|
3152 // from the physical frame. |
|
3153 __ set_last_Java_frame(/*sp*/R1_SP, noreg); |
|
3154 |
|
3155 int frame_complete = __ offset(); |
|
3156 |
|
3157 // Pass R19_method as 2nd (optional) argument, used by |
|
3158 // counter_overflow_stub. |
|
3159 __ call_VM_leaf(destination, R16_thread, R19_method); |
|
3160 address calls_return_pc = __ last_calls_return_pc(); |
|
3161 // Set an oopmap for the call site. |
|
3162 // We need this not only for callee-saved registers, but also for volatile |
|
3163 // registers that the compiler might be keeping live across a safepoint. |
|
3164 // Create the oopmap for the call's return pc. |
|
3165 oop_maps->add_gc_map(calls_return_pc - start, map); |
|
3166 |
|
3167 // R3_RET contains the address we are going to jump to assuming no exception got installed. |
|
3168 |
|
3169 // clear last_Java_sp |
|
3170 __ reset_last_Java_frame(); |
|
3171 |
|
3172 // Check for pending exceptions. |
|
3173 BLOCK_COMMENT("Check for pending exceptions."); |
|
3174 Label pending; |
|
3175 __ ld(R11_scratch1, thread_(pending_exception)); |
|
3176 __ cmpdi(CCR0, R11_scratch1, 0); |
|
3177 __ bne(CCR0, pending); |
|
3178 |
|
3179 __ mtctr(R3_RET); // Ctr will not be touched by restore_live_registers_and_pop_frame. |
|
3180 |
|
3181 RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ false); |
|
3182 |
|
3183 // Get the returned methodOop. |
|
3184 __ get_vm_result_2(R19_method); |
|
3185 |
|
3186 __ bctr(); |
|
3187 |
|
3188 |
|
3189 // Pending exception after the safepoint. |
|
3190 __ BIND(pending); |
|
3191 |
|
3192 RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ true); |
|
3193 |
|
3194 // exception pending => remove activation and forward to exception handler |
|
3195 |
|
3196 __ li(R11_scratch1, 0); |
|
3197 __ ld(R3_ARG1, thread_(pending_exception)); |
|
3198 __ std(R11_scratch1, in_bytes(JavaThread::vm_result_offset()), R16_thread); |
|
3199 __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); |
|
3200 |
|
3201 // ------------- |
|
3202 // Make sure all code is generated. |
|
3203 masm->flush(); |
|
3204 |
|
3205 // return the blob |
|
3206 // frame_size_words or bytes?? |
|
3207 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_bytes/wordSize, |
|
3208 oop_maps, true); |
|
3209 } |