author | shade |
Thu, 04 Feb 2016 21:44:23 +0300 | |
changeset 35708 | 290a3952e434 |
parent 35232 | 76aed99c0ddd |
child 35495 | e27da438fa13 |
permissions | -rw-r--r-- |
29183 | 1 |
/* |
31411
92e500124bca
8129757: ppc/aarch: Fix passing thread to runtime after "8073165: Contended Locking fast exit bucket."
goetz
parents:
30552
diff
changeset
|
2 |
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. |
92e500124bca
8129757: ppc/aarch: Fix passing thread to runtime after "8073165: Contended Locking fast exit bucket."
goetz
parents:
30552
diff
changeset
|
3 |
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved. |
29183 | 4 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
5 |
* |
|
6 |
* This code is free software; you can redistribute it and/or modify it |
|
7 |
* under the terms of the GNU General Public License version 2 only, as |
|
8 |
* published by the Free Software Foundation. |
|
9 |
* |
|
10 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
11 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
12 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
13 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
14 |
* accompanied this code). |
|
15 |
* |
|
16 |
* You should have received a copy of the GNU General Public License version |
|
17 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
18 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
19 |
* |
|
20 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
21 |
* or visit www.oracle.com if you need additional information or have any |
|
22 |
* questions. |
|
23 |
* |
|
24 |
*/ |
|
25 |
||
26 |
#include "precompiled.hpp" |
|
27 |
#include "asm/macroAssembler.hpp" |
|
28 |
#include "asm/macroAssembler.inline.hpp" |
|
29 |
#include "code/debugInfoRec.hpp" |
|
30 |
#include "code/icBuffer.hpp" |
|
31 |
#include "code/vtableStubs.hpp" |
|
32 |
#include "interpreter/interpreter.hpp" |
|
33 |
#include "interpreter/interp_masm.hpp" |
|
34 |
#include "oops/compiledICHolder.hpp" |
|
35 |
#include "prims/jvmtiRedefineClassesTrace.hpp" |
|
36 |
#include "runtime/sharedRuntime.hpp" |
|
37 |
#include "runtime/vframeArray.hpp" |
|
38 |
#include "vmreg_aarch64.inline.hpp" |
|
39 |
#ifdef COMPILER1 |
|
40 |
#include "c1/c1_Runtime1.hpp" |
|
41 |
#endif |
|
35148 | 42 |
#if defined(COMPILER2) || INCLUDE_JVMCI |
29183 | 43 |
#include "adfiles/ad_aarch64.hpp" |
44 |
#include "opto/runtime.hpp" |
|
45 |
#endif |
|
35148 | 46 |
#if INCLUDE_JVMCI |
47 |
#include "jvmci/jvmciJavaClasses.hpp" |
|
48 |
#endif |
|
29183 | 49 |
|
50 |
#ifdef BUILTIN_SIM |
|
51 |
#include "../../../../../../simulator/simulator.hpp" |
|
52 |
#endif |
|
53 |
||
54 |
#define __ masm-> |
|
55 |
||
56 |
const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size; |
|
57 |
||
58 |
class SimpleRuntimeFrame { |
|
59 |
||
60 |
public: |
|
61 |
||
62 |
// Most of the runtime stubs have this simple frame layout. |
|
63 |
// This class exists to make the layout shared in one place. |
|
64 |
// Offsets are for compiler stack slots, which are jints. |
|
65 |
enum layout { |
|
66 |
// The frame sender code expects that rbp will be in the "natural" place and |
|
67 |
// will override any oopMap setting for it. We must therefore force the layout |
|
68 |
// so that it agrees with the frame sender code. |
|
69 |
// we don't expect any arg reg save area so aarch64 asserts that |
|
70 |
// frame::arg_reg_save_area_bytes == 0 |
|
71 |
rbp_off = 0, |
|
72 |
rbp_off2, |
|
73 |
return_off, return_off2, |
|
74 |
framesize |
|
75 |
}; |
|
76 |
}; |
|
77 |
||
78 |
// FIXME -- this is used by C1 |
|
79 |
class RegisterSaver { |
|
80 |
public: |
|
33061
69a83b5ce390
8136524: aarch64: test/compiler/runtime/7196199/Test7196199.java fails
enevill
parents:
32395
diff
changeset
|
81 |
static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors = false); |
69a83b5ce390
8136524: aarch64: test/compiler/runtime/7196199/Test7196199.java fails
enevill
parents:
32395
diff
changeset
|
82 |
static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false); |
29183 | 83 |
|
84 |
// Offsets into the register save area |
|
85 |
// Used by deoptimization when it is managing result register |
|
86 |
// values on its own |
|
87 |
||
88 |
static int r0_offset_in_bytes(void) { return (32 + r0->encoding()) * wordSize; } |
|
89 |
static int reg_offset_in_bytes(Register r) { return r0_offset_in_bytes() + r->encoding() * wordSize; } |
|
90 |
static int rmethod_offset_in_bytes(void) { return reg_offset_in_bytes(rmethod); } |
|
91 |
static int rscratch1_offset_in_bytes(void) { return (32 + rscratch1->encoding()) * wordSize; } |
|
92 |
static int v0_offset_in_bytes(void) { return 0; } |
|
93 |
static int return_offset_in_bytes(void) { return (32 /* floats*/ + 31 /* gregs*/) * wordSize; } |
|
94 |
||
95 |
// During deoptimization only the result registers need to be restored, |
|
96 |
// all the other values have already been extracted. |
|
97 |
static void restore_result_registers(MacroAssembler* masm); |
|
98 |
||
99 |
// Capture info about frame layout |
|
100 |
enum layout { |
|
101 |
fpu_state_off = 0, |
|
102 |
fpu_state_end = fpu_state_off+FPUStateSizeInWords-1, |
|
103 |
// The frame sender code expects that rfp will be in |
|
104 |
// the "natural" place and will override any oopMap |
|
105 |
// setting for it. We must therefore force the layout |
|
106 |
// so that it agrees with the frame sender code. |
|
107 |
r0_off = fpu_state_off+FPUStateSizeInWords, |
|
108 |
rfp_off = r0_off + 30 * 2, |
|
109 |
return_off = rfp_off + 2, // slot for return address |
|
110 |
reg_save_size = return_off + 2}; |
|
111 |
||
112 |
}; |
|
113 |
||
33061
69a83b5ce390
8136524: aarch64: test/compiler/runtime/7196199/Test7196199.java fails
enevill
parents:
32395
diff
changeset
|
114 |
OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors) { |
35148 | 115 |
#if defined(COMPILER2) || INCLUDE_JVMCI |
33061
69a83b5ce390
8136524: aarch64: test/compiler/runtime/7196199/Test7196199.java fails
enevill
parents:
32395
diff
changeset
|
116 |
if (save_vectors) { |
69a83b5ce390
8136524: aarch64: test/compiler/runtime/7196199/Test7196199.java fails
enevill
parents:
32395
diff
changeset
|
117 |
// Save upper half of vector registers |
69a83b5ce390
8136524: aarch64: test/compiler/runtime/7196199/Test7196199.java fails
enevill
parents:
32395
diff
changeset
|
118 |
int vect_words = 32 * 8 / wordSize; |
69a83b5ce390
8136524: aarch64: test/compiler/runtime/7196199/Test7196199.java fails
enevill
parents:
32395
diff
changeset
|
119 |
additional_frame_words += vect_words; |
69a83b5ce390
8136524: aarch64: test/compiler/runtime/7196199/Test7196199.java fails
enevill
parents:
32395
diff
changeset
|
120 |
} |
69a83b5ce390
8136524: aarch64: test/compiler/runtime/7196199/Test7196199.java fails
enevill
parents:
32395
diff
changeset
|
121 |
#else |
35148 | 122 |
assert(!save_vectors, "vectors are generated only by C2 and JVMCI"); |
33061
69a83b5ce390
8136524: aarch64: test/compiler/runtime/7196199/Test7196199.java fails
enevill
parents:
32395
diff
changeset
|
123 |
#endif |
69a83b5ce390
8136524: aarch64: test/compiler/runtime/7196199/Test7196199.java fails
enevill
parents:
32395
diff
changeset
|
124 |
|
29183 | 125 |
int frame_size_in_bytes = round_to(additional_frame_words*wordSize + |
126 |
reg_save_size*BytesPerInt, 16); |
|
127 |
// OopMap frame size is in compiler stack slots (jint's) not bytes or words |
|
128 |
int frame_size_in_slots = frame_size_in_bytes / BytesPerInt; |
|
129 |
// The caller will allocate additional_frame_words |
|
130 |
int additional_frame_slots = additional_frame_words*wordSize / BytesPerInt; |
|
131 |
// CodeBlob frame size is in words. |
|
132 |
int frame_size_in_words = frame_size_in_bytes / wordSize; |
|
133 |
*total_frame_words = frame_size_in_words; |
|
134 |
||
135 |
// Save registers, fpu state, and flags. |
|
136 |
||
137 |
__ enter(); |
|
33061
69a83b5ce390
8136524: aarch64: test/compiler/runtime/7196199/Test7196199.java fails
enevill
parents:
32395
diff
changeset
|
138 |
__ push_CPU_state(save_vectors); |
29183 | 139 |
|
140 |
// Set an oopmap for the call site. This oopmap will map all |
|
141 |
// oop-registers and debug-info registers as callee-saved. This |
|
142 |
// will allow deoptimization at this safepoint to find all possible |
|
143 |
// debug-info recordings, as well as let GC find all oops. |
|
144 |
||
145 |
OopMapSet *oop_maps = new OopMapSet(); |
|
146 |
OopMap* oop_map = new OopMap(frame_size_in_slots, 0); |
|
147 |
||
148 |
for (int i = 0; i < RegisterImpl::number_of_registers; i++) { |
|
149 |
Register r = as_Register(i); |
|
150 |
if (r < rheapbase && r != rscratch1 && r != rscratch2) { |
|
151 |
int sp_offset = 2 * (i + 32); // SP offsets are in 4-byte words, |
|
152 |
// register slots are 8 bytes |
|
153 |
// wide, 32 floating-point |
|
154 |
// registers |
|
33061
69a83b5ce390
8136524: aarch64: test/compiler/runtime/7196199/Test7196199.java fails
enevill
parents:
32395
diff
changeset
|
155 |
oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset + additional_frame_slots), |
29183 | 156 |
r->as_VMReg()); |
157 |
} |
|
158 |
} |
|
159 |
||
160 |
for (int i = 0; i < FloatRegisterImpl::number_of_registers; i++) { |
|
161 |
FloatRegister r = as_FloatRegister(i); |
|
33061
69a83b5ce390
8136524: aarch64: test/compiler/runtime/7196199/Test7196199.java fails
enevill
parents:
32395
diff
changeset
|
162 |
int sp_offset = save_vectors ? (4 * i) : (2 * i); |
29183 | 163 |
oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset), |
164 |
r->as_VMReg()); |
|
165 |
} |
|
166 |
||
167 |
return oop_map; |
|
168 |
} |
|
169 |
||
33061
69a83b5ce390
8136524: aarch64: test/compiler/runtime/7196199/Test7196199.java fails
enevill
parents:
32395
diff
changeset
|
170 |
void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) { |
69a83b5ce390
8136524: aarch64: test/compiler/runtime/7196199/Test7196199.java fails
enevill
parents:
32395
diff
changeset
|
171 |
#ifndef COMPILER2 |
35148 | 172 |
assert(!restore_vectors, "vectors are generated only by C2 and JVMCI"); |
33061
69a83b5ce390
8136524: aarch64: test/compiler/runtime/7196199/Test7196199.java fails
enevill
parents:
32395
diff
changeset
|
173 |
#endif |
69a83b5ce390
8136524: aarch64: test/compiler/runtime/7196199/Test7196199.java fails
enevill
parents:
32395
diff
changeset
|
174 |
__ pop_CPU_state(restore_vectors); |
29183 | 175 |
__ leave(); |
176 |
} |
|
177 |
||
178 |
void RegisterSaver::restore_result_registers(MacroAssembler* masm) { |
|
179 |
||
180 |
// Just restore result register. Only used by deoptimization. By |
|
181 |
// now any callee save register that needs to be restored to a c2 |
|
182 |
// caller of the deoptee has been extracted into the vframeArray |
|
183 |
// and will be stuffed into the c2i adapter we create for later |
|
184 |
// restoration so only result registers need to be restored here. |
|
185 |
||
186 |
// Restore fp result register |
|
187 |
__ ldrd(v0, Address(sp, v0_offset_in_bytes())); |
|
188 |
// Restore integer result register |
|
189 |
__ ldr(r0, Address(sp, r0_offset_in_bytes())); |
|
190 |
||
191 |
// Pop all of the register save are off the stack |
|
192 |
__ add(sp, sp, round_to(return_offset_in_bytes(), 16)); |
|
193 |
} |
|
194 |
||
195 |
// Is vector's size (in bytes) bigger than a size saved by default? |
|
33061
69a83b5ce390
8136524: aarch64: test/compiler/runtime/7196199/Test7196199.java fails
enevill
parents:
32395
diff
changeset
|
196 |
// 8 bytes vector registers are saved by default on AArch64. |
29183 | 197 |
bool SharedRuntime::is_wide_vector(int size) { |
33061
69a83b5ce390
8136524: aarch64: test/compiler/runtime/7196199/Test7196199.java fails
enevill
parents:
32395
diff
changeset
|
198 |
return size > 8; |
29183 | 199 |
} |
200 |
// The java_calling_convention describes stack locations as ideal slots on |
|
201 |
// a frame with no abi restrictions. Since we must observe abi restrictions |
|
202 |
// (like the placement of the register window) the slots must be biased by |
|
203 |
// the following value. |
|
204 |
static int reg2offset_in(VMReg r) { |
|
205 |
// Account for saved rfp and lr |
|
206 |
// This should really be in_preserve_stack_slots |
|
207 |
return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size; |
|
208 |
} |
|
209 |
||
210 |
static int reg2offset_out(VMReg r) { |
|
211 |
return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; |
|
212 |
} |
|
213 |
||
214 |
template <class T> static const T& min (const T& a, const T& b) { |
|
215 |
return (a > b) ? b : a; |
|
216 |
} |
|
217 |
||
218 |
// --------------------------------------------------------------------------- |
|
219 |
// Read the array of BasicTypes from a signature, and compute where the |
|
220 |
// arguments should go. Values in the VMRegPair regs array refer to 4-byte |
|
221 |
// quantities. Values less than VMRegImpl::stack0 are registers, those above |
|
222 |
// refer to 4-byte stack slots. All stack slots are based off of the stack pointer |
|
223 |
// as framesizes are fixed. |
|
224 |
// VMRegImpl::stack0 refers to the first slot 0(sp). |
|
225 |
// and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register |
|
226 |
// up to RegisterImpl::number_of_registers) are the 64-bit |
|
227 |
// integer registers. |
|
228 |
||
229 |
// Note: the INPUTS in sig_bt are in units of Java argument words, |
|
230 |
// which are 64-bit. The OUTPUTS are in 32-bit units. |
|
231 |
||
232 |
// The Java calling convention is a "shifted" version of the C ABI. |
|
233 |
// By skipping the first C ABI register we can call non-static jni |
|
234 |
// methods with small numbers of arguments without having to shuffle |
|
235 |
// the arguments at all. Since we control the java ABI we ought to at |
|
236 |
// least get some advantage out of it. |
|
237 |
||
238 |
int SharedRuntime::java_calling_convention(const BasicType *sig_bt, |
|
239 |
VMRegPair *regs, |
|
240 |
int total_args_passed, |
|
241 |
int is_outgoing) { |
|
242 |
||
243 |
// Create the mapping between argument positions and |
|
244 |
// registers. |
|
245 |
static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = { |
|
246 |
j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5, j_rarg6, j_rarg7 |
|
247 |
}; |
|
248 |
static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_j] = { |
|
249 |
j_farg0, j_farg1, j_farg2, j_farg3, |
|
250 |
j_farg4, j_farg5, j_farg6, j_farg7 |
|
251 |
}; |
|
252 |
||
253 |
||
254 |
uint int_args = 0; |
|
255 |
uint fp_args = 0; |
|
256 |
uint stk_args = 0; // inc by 2 each time |
|
257 |
||
258 |
for (int i = 0; i < total_args_passed; i++) { |
|
259 |
switch (sig_bt[i]) { |
|
260 |
case T_BOOLEAN: |
|
261 |
case T_CHAR: |
|
262 |
case T_BYTE: |
|
263 |
case T_SHORT: |
|
264 |
case T_INT: |
|
265 |
if (int_args < Argument::n_int_register_parameters_j) { |
|
266 |
regs[i].set1(INT_ArgReg[int_args++]->as_VMReg()); |
|
267 |
} else { |
|
268 |
regs[i].set1(VMRegImpl::stack2reg(stk_args)); |
|
269 |
stk_args += 2; |
|
270 |
} |
|
271 |
break; |
|
272 |
case T_VOID: |
|
273 |
// halves of T_LONG or T_DOUBLE |
|
274 |
assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half"); |
|
275 |
regs[i].set_bad(); |
|
276 |
break; |
|
277 |
case T_LONG: |
|
278 |
assert(sig_bt[i + 1] == T_VOID, "expecting half"); |
|
279 |
// fall through |
|
280 |
case T_OBJECT: |
|
281 |
case T_ARRAY: |
|
282 |
case T_ADDRESS: |
|
283 |
if (int_args < Argument::n_int_register_parameters_j) { |
|
284 |
regs[i].set2(INT_ArgReg[int_args++]->as_VMReg()); |
|
285 |
} else { |
|
286 |
regs[i].set2(VMRegImpl::stack2reg(stk_args)); |
|
287 |
stk_args += 2; |
|
288 |
} |
|
289 |
break; |
|
290 |
case T_FLOAT: |
|
291 |
if (fp_args < Argument::n_float_register_parameters_j) { |
|
292 |
regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg()); |
|
293 |
} else { |
|
294 |
regs[i].set1(VMRegImpl::stack2reg(stk_args)); |
|
295 |
stk_args += 2; |
|
296 |
} |
|
297 |
break; |
|
298 |
case T_DOUBLE: |
|
299 |
assert(sig_bt[i + 1] == T_VOID, "expecting half"); |
|
300 |
if (fp_args < Argument::n_float_register_parameters_j) { |
|
301 |
regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg()); |
|
302 |
} else { |
|
303 |
regs[i].set2(VMRegImpl::stack2reg(stk_args)); |
|
304 |
stk_args += 2; |
|
305 |
} |
|
306 |
break; |
|
307 |
default: |
|
308 |
ShouldNotReachHere(); |
|
309 |
break; |
|
310 |
} |
|
311 |
} |
|
312 |
||
313 |
return round_to(stk_args, 2); |
|
314 |
} |
|
315 |
||
316 |
// Patch the callers callsite with entry to compiled code if it exists. |
|
317 |
static void patch_callers_callsite(MacroAssembler *masm) { |
|
318 |
Label L; |
|
319 |
__ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset()))); |
|
320 |
__ cbz(rscratch1, L); |
|
321 |
||
322 |
__ enter(); |
|
323 |
__ push_CPU_state(); |
|
324 |
||
325 |
// VM needs caller's callsite |
|
326 |
// VM needs target method |
|
327 |
// This needs to be a long call since we will relocate this adapter to |
|
328 |
// the codeBuffer and it may not reach |
|
329 |
||
330 |
#ifndef PRODUCT |
|
331 |
assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area"); |
|
332 |
#endif |
|
333 |
||
334 |
__ mov(c_rarg0, rmethod); |
|
335 |
__ mov(c_rarg1, lr); |
|
336 |
__ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite))); |
|
337 |
__ blrt(rscratch1, 2, 0, 0); |
|
338 |
__ maybe_isb(); |
|
339 |
||
340 |
__ pop_CPU_state(); |
|
341 |
// restore sp |
|
342 |
__ leave(); |
|
343 |
__ bind(L); |
|
344 |
} |
|
345 |
||
346 |
static void gen_c2i_adapter(MacroAssembler *masm, |
|
347 |
int total_args_passed, |
|
348 |
int comp_args_on_stack, |
|
349 |
const BasicType *sig_bt, |
|
350 |
const VMRegPair *regs, |
|
351 |
Label& skip_fixup) { |
|
352 |
// Before we get into the guts of the C2I adapter, see if we should be here |
|
353 |
// at all. We've come from compiled code and are attempting to jump to the |
|
354 |
// interpreter, which means the caller made a static call to get here |
|
355 |
// (vcalls always get a compiled target if there is one). Check for a |
|
356 |
// compiled target. If there is one, we need to patch the caller's call. |
|
357 |
patch_callers_callsite(masm); |
|
358 |
||
359 |
__ bind(skip_fixup); |
|
360 |
||
361 |
int words_pushed = 0; |
|
362 |
||
363 |
// Since all args are passed on the stack, total_args_passed * |
|
364 |
// Interpreter::stackElementSize is the space we need. |
|
365 |
||
366 |
int extraspace = total_args_passed * Interpreter::stackElementSize; |
|
367 |
||
368 |
__ mov(r13, sp); |
|
369 |
||
370 |
// stack is aligned, keep it that way |
|
371 |
extraspace = round_to(extraspace, 2*wordSize); |
|
372 |
||
373 |
if (extraspace) |
|
374 |
__ sub(sp, sp, extraspace); |
|
375 |
||
376 |
// Now write the args into the outgoing interpreter space |
|
377 |
for (int i = 0; i < total_args_passed; i++) { |
|
378 |
if (sig_bt[i] == T_VOID) { |
|
379 |
assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); |
|
380 |
continue; |
|
381 |
} |
|
382 |
||
383 |
// offset to start parameters |
|
384 |
int st_off = (total_args_passed - i - 1) * Interpreter::stackElementSize; |
|
385 |
int next_off = st_off - Interpreter::stackElementSize; |
|
386 |
||
387 |
// Say 4 args: |
|
388 |
// i st_off |
|
389 |
// 0 32 T_LONG |
|
390 |
// 1 24 T_VOID |
|
391 |
// 2 16 T_OBJECT |
|
392 |
// 3 8 T_BOOL |
|
393 |
// - 0 return address |
|
394 |
// |
|
395 |
// However to make thing extra confusing. Because we can fit a long/double in |
|
396 |
// a single slot on a 64 bt vm and it would be silly to break them up, the interpreter |
|
397 |
// leaves one slot empty and only stores to a single slot. In this case the |
|
398 |
// slot that is occupied is the T_VOID slot. See I said it was confusing. |
|
399 |
||
400 |
VMReg r_1 = regs[i].first(); |
|
401 |
VMReg r_2 = regs[i].second(); |
|
402 |
if (!r_1->is_valid()) { |
|
403 |
assert(!r_2->is_valid(), ""); |
|
404 |
continue; |
|
405 |
} |
|
406 |
if (r_1->is_stack()) { |
|
407 |
// memory to memory use rscratch1 |
|
408 |
int ld_off = (r_1->reg2stack() * VMRegImpl::stack_slot_size |
|
409 |
+ extraspace |
|
410 |
+ words_pushed * wordSize); |
|
411 |
if (!r_2->is_valid()) { |
|
412 |
// sign extend?? |
|
413 |
__ ldrw(rscratch1, Address(sp, ld_off)); |
|
414 |
__ str(rscratch1, Address(sp, st_off)); |
|
415 |
||
416 |
} else { |
|
417 |
||
418 |
__ ldr(rscratch1, Address(sp, ld_off)); |
|
419 |
||
420 |
// Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG |
|
421 |
// T_DOUBLE and T_LONG use two slots in the interpreter |
|
422 |
if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { |
|
423 |
// ld_off == LSW, ld_off+wordSize == MSW |
|
424 |
// st_off == MSW, next_off == LSW |
|
425 |
__ str(rscratch1, Address(sp, next_off)); |
|
426 |
#ifdef ASSERT |
|
427 |
// Overwrite the unused slot with known junk |
|
428 |
__ mov(rscratch1, 0xdeadffffdeadaaaaul); |
|
429 |
__ str(rscratch1, Address(sp, st_off)); |
|
430 |
#endif /* ASSERT */ |
|
431 |
} else { |
|
432 |
__ str(rscratch1, Address(sp, st_off)); |
|
433 |
} |
|
434 |
} |
|
435 |
} else if (r_1->is_Register()) { |
|
436 |
Register r = r_1->as_Register(); |
|
437 |
if (!r_2->is_valid()) { |
|
438 |
// must be only an int (or less ) so move only 32bits to slot |
|
439 |
// why not sign extend?? |
|
440 |
__ str(r, Address(sp, st_off)); |
|
441 |
} else { |
|
442 |
// Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG |
|
443 |
// T_DOUBLE and T_LONG use two slots in the interpreter |
|
444 |
if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { |
|
445 |
// long/double in gpr |
|
446 |
#ifdef ASSERT |
|
447 |
// Overwrite the unused slot with known junk |
|
448 |
__ mov(rscratch1, 0xdeadffffdeadaaabul); |
|
449 |
__ str(rscratch1, Address(sp, st_off)); |
|
450 |
#endif /* ASSERT */ |
|
451 |
__ str(r, Address(sp, next_off)); |
|
452 |
} else { |
|
453 |
__ str(r, Address(sp, st_off)); |
|
454 |
} |
|
455 |
} |
|
456 |
} else { |
|
457 |
assert(r_1->is_FloatRegister(), ""); |
|
458 |
if (!r_2->is_valid()) { |
|
459 |
// only a float use just part of the slot |
|
460 |
__ strs(r_1->as_FloatRegister(), Address(sp, st_off)); |
|
461 |
} else { |
|
462 |
#ifdef ASSERT |
|
463 |
// Overwrite the unused slot with known junk |
|
464 |
__ mov(rscratch1, 0xdeadffffdeadaaacul); |
|
465 |
__ str(rscratch1, Address(sp, st_off)); |
|
466 |
#endif /* ASSERT */ |
|
467 |
__ strd(r_1->as_FloatRegister(), Address(sp, next_off)); |
|
468 |
} |
|
469 |
} |
|
470 |
} |
|
471 |
||
472 |
__ mov(esp, sp); // Interp expects args on caller's expression stack |
|
473 |
||
474 |
__ ldr(rscratch1, Address(rmethod, in_bytes(Method::interpreter_entry_offset()))); |
|
475 |
__ br(rscratch1); |
|
476 |
} |
|
477 |
||
478 |
||
33160
c59f1676d27e
8136421: JEP 243: Java-Level JVM Compiler Interface
twisti
parents:
33084
diff
changeset
|
479 |
void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, |
c59f1676d27e
8136421: JEP 243: Java-Level JVM Compiler Interface
twisti
parents:
33084
diff
changeset
|
480 |
int total_args_passed, |
c59f1676d27e
8136421: JEP 243: Java-Level JVM Compiler Interface
twisti
parents:
33084
diff
changeset
|
481 |
int comp_args_on_stack, |
c59f1676d27e
8136421: JEP 243: Java-Level JVM Compiler Interface
twisti
parents:
33084
diff
changeset
|
482 |
const BasicType *sig_bt, |
c59f1676d27e
8136421: JEP 243: Java-Level JVM Compiler Interface
twisti
parents:
33084
diff
changeset
|
483 |
const VMRegPair *regs) { |
29183 | 484 |
|
485 |
// Note: r13 contains the senderSP on entry. We must preserve it since |
|
486 |
// we may do a i2c -> c2i transition if we lose a race where compiled |
|
487 |
// code goes non-entrant while we get args ready. |
|
488 |
||
489 |
// In addition we use r13 to locate all the interpreter args because |
|
490 |
// we must align the stack to 16 bytes. |
|
491 |
||
492 |
// Adapters are frameless. |
|
493 |
||
494 |
// An i2c adapter is frameless because the *caller* frame, which is |
|
495 |
// interpreted, routinely repairs its own esp (from |
|
496 |
// interpreter_frame_last_sp), even if a callee has modified the |
|
497 |
// stack pointer. It also recalculates and aligns sp. |
|
498 |
||
499 |
// A c2i adapter is frameless because the *callee* frame, which is |
|
500 |
// interpreted, routinely repairs its caller's sp (from sender_sp, |
|
501 |
// which is set up via the senderSP register). |
|
502 |
||
503 |
// In other words, if *either* the caller or callee is interpreted, we can |
|
504 |
// get the stack pointer repaired after a call. |
|
505 |
||
506 |
// This is why c2i and i2c adapters cannot be indefinitely composed. |
|
507 |
// In particular, if a c2i adapter were to somehow call an i2c adapter, |
|
508 |
// both caller and callee would be compiled methods, and neither would |
|
509 |
// clean up the stack pointer changes performed by the two adapters. |
|
510 |
// If this happens, control eventually transfers back to the compiled |
|
511 |
// caller, but with an uncorrected stack, causing delayed havoc. |
|
512 |
||
513 |
if (VerifyAdapterCalls && |
|
514 |
(Interpreter::code() != NULL || StubRoutines::code1() != NULL)) { |
|
515 |
#if 0 |
|
516 |
// So, let's test for cascading c2i/i2c adapters right now. |
|
517 |
// assert(Interpreter::contains($return_addr) || |
|
518 |
// StubRoutines::contains($return_addr), |
|
519 |
// "i2c adapter must return to an interpreter frame"); |
|
520 |
__ block_comment("verify_i2c { "); |
|
521 |
Label L_ok; |
|
522 |
if (Interpreter::code() != NULL) |
|
523 |
range_check(masm, rax, r11, |
|
524 |
Interpreter::code()->code_start(), Interpreter::code()->code_end(), |
|
525 |
L_ok); |
|
526 |
if (StubRoutines::code1() != NULL) |
|
527 |
range_check(masm, rax, r11, |
|
528 |
StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(), |
|
529 |
L_ok); |
|
530 |
if (StubRoutines::code2() != NULL) |
|
531 |
range_check(masm, rax, r11, |
|
532 |
StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(), |
|
533 |
L_ok); |
|
534 |
const char* msg = "i2c adapter must return to an interpreter frame"; |
|
535 |
__ block_comment(msg); |
|
536 |
__ stop(msg); |
|
537 |
__ bind(L_ok); |
|
538 |
__ block_comment("} verify_i2ce "); |
|
539 |
#endif |
|
540 |
} |
|
541 |
||
542 |
// Cut-out for having no stack args. |
|
543 |
int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord; |
|
544 |
if (comp_args_on_stack) { |
|
545 |
__ sub(rscratch1, sp, comp_words_on_stack * wordSize); |
|
546 |
__ andr(sp, rscratch1, -16); |
|
547 |
} |
|
548 |
||
549 |
// Will jump to the compiled code just as if compiled code was doing it. |
|
550 |
// Pre-load the register-jump target early, to schedule it better. |
|
551 |
__ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_offset()))); |
|
552 |
||
35148 | 553 |
#if INCLUDE_JVMCI |
554 |
if (EnableJVMCI) { |
|
555 |
// check if this call should be routed towards a specific entry point |
|
556 |
__ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset()))); |
|
557 |
Label no_alternative_target; |
|
558 |
__ cbz(rscratch2, no_alternative_target); |
|
559 |
__ mov(rscratch1, rscratch2); |
|
560 |
__ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset()))); |
|
561 |
__ bind(no_alternative_target); |
|
562 |
} |
|
563 |
#endif // INCLUDE_JVMCI |
|
564 |
||
29183 | 565 |
// Now generate the shuffle code. |
566 |
for (int i = 0; i < total_args_passed; i++) { |
|
567 |
if (sig_bt[i] == T_VOID) { |
|
568 |
assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); |
|
569 |
continue; |
|
570 |
} |
|
571 |
||
572 |
// Pick up 0, 1 or 2 words from SP+offset. |
|
573 |
||
574 |
assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), |
|
575 |
"scrambled load targets?"); |
|
576 |
// Load in argument order going down. |
|
577 |
int ld_off = (total_args_passed - i - 1)*Interpreter::stackElementSize; |
|
578 |
// Point to interpreter value (vs. tag) |
|
579 |
int next_off = ld_off - Interpreter::stackElementSize; |
|
580 |
// |
|
581 |
// |
|
582 |
// |
|
583 |
VMReg r_1 = regs[i].first(); |
|
584 |
VMReg r_2 = regs[i].second(); |
|
585 |
if (!r_1->is_valid()) { |
|
586 |
assert(!r_2->is_valid(), ""); |
|
587 |
continue; |
|
588 |
} |
|
589 |
if (r_1->is_stack()) { |
|
590 |
// Convert stack slot to an SP offset (+ wordSize to account for return address ) |
|
591 |
int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size; |
|
592 |
if (!r_2->is_valid()) { |
|
593 |
// sign extend??? |
|
594 |
__ ldrsw(rscratch2, Address(esp, ld_off)); |
|
595 |
__ str(rscratch2, Address(sp, st_off)); |
|
596 |
} else { |
|
597 |
// |
|
598 |
// We are using two optoregs. This can be either T_OBJECT, |
|
599 |
// T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates |
|
600 |
// two slots but only uses one for thr T_LONG or T_DOUBLE case |
|
601 |
// So we must adjust where to pick up the data to match the |
|
602 |
// interpreter. |
|
603 |
// |
|
604 |
// Interpreter local[n] == MSW, local[n+1] == LSW however locals |
|
605 |
// are accessed as negative so LSW is at LOW address |
|
606 |
||
607 |
// ld_off is MSW so get LSW |
|
608 |
const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)? |
|
609 |
next_off : ld_off; |
|
610 |
__ ldr(rscratch2, Address(esp, offset)); |
|
611 |
// st_off is LSW (i.e. reg.first()) |
|
612 |
__ str(rscratch2, Address(sp, st_off)); |
|
613 |
} |
|
614 |
} else if (r_1->is_Register()) { // Register argument |
|
615 |
Register r = r_1->as_Register(); |
|
616 |
if (r_2->is_valid()) { |
|
617 |
// |
|
618 |
// We are using two VMRegs. This can be either T_OBJECT, |
|
619 |
// T_ADDRESS, T_LONG, or T_DOUBLE the interpreter allocates |
|
620 |
// two slots but only uses one for thr T_LONG or T_DOUBLE case |
|
621 |
// So we must adjust where to pick up the data to match the |
|
622 |
// interpreter. |
|
623 |
||
624 |
const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)? |
|
625 |
next_off : ld_off; |
|
626 |
||
627 |
// this can be a misaligned move |
|
628 |
__ ldr(r, Address(esp, offset)); |
|
629 |
} else { |
|
630 |
// sign extend and use a full word? |
|
631 |
__ ldrw(r, Address(esp, ld_off)); |
|
632 |
} |
|
633 |
} else { |
|
634 |
if (!r_2->is_valid()) { |
|
635 |
__ ldrs(r_1->as_FloatRegister(), Address(esp, ld_off)); |
|
636 |
} else { |
|
637 |
__ ldrd(r_1->as_FloatRegister(), Address(esp, next_off)); |
|
638 |
} |
|
639 |
} |
|
640 |
} |
|
641 |
||
642 |
// 6243940 We might end up in handle_wrong_method if |
|
643 |
// the callee is deoptimized as we race thru here. If that |
|
644 |
// happens we don't want to take a safepoint because the |
|
645 |
// caller frame will look interpreted and arguments are now |
|
646 |
// "compiled" so it is much better to make this transition |
|
647 |
// invisible to the stack walking code. Unfortunately if |
|
648 |
// we try and find the callee by normal means a safepoint |
|
649 |
// is possible. So we stash the desired callee in the thread |
|
650 |
// and the vm will find there should this case occur. |
|
651 |
||
652 |
__ str(rmethod, Address(rthread, JavaThread::callee_target_offset())); |
|
653 |
||
654 |
__ br(rscratch1); |
|
655 |
} |
|
656 |
||
657 |
#ifdef BUILTIN_SIM |
|
658 |
static void generate_i2c_adapter_name(char *result, int total_args_passed, const BasicType *sig_bt) |
|
659 |
{ |
|
660 |
strcpy(result, "i2c("); |
|
661 |
int idx = 4; |
|
662 |
for (int i = 0; i < total_args_passed; i++) { |
|
663 |
switch(sig_bt[i]) { |
|
664 |
case T_BOOLEAN: |
|
665 |
result[idx++] = 'Z'; |
|
666 |
break; |
|
667 |
case T_CHAR: |
|
668 |
result[idx++] = 'C'; |
|
669 |
break; |
|
670 |
case T_FLOAT: |
|
671 |
result[idx++] = 'F'; |
|
672 |
break; |
|
673 |
case T_DOUBLE: |
|
674 |
assert((i < (total_args_passed - 1)) && (sig_bt[i+1] == T_VOID), |
|
675 |
"double must be followed by void"); |
|
676 |
i++; |
|
677 |
result[idx++] = 'D'; |
|
678 |
break; |
|
679 |
case T_BYTE: |
|
680 |
result[idx++] = 'B'; |
|
681 |
break; |
|
682 |
case T_SHORT: |
|
683 |
result[idx++] = 'S'; |
|
684 |
break; |
|
685 |
case T_INT: |
|
686 |
result[idx++] = 'I'; |
|
687 |
break; |
|
688 |
case T_LONG: |
|
689 |
assert((i < (total_args_passed - 1)) && (sig_bt[i+1] == T_VOID), |
|
690 |
"long must be followed by void"); |
|
691 |
i++; |
|
692 |
result[idx++] = 'L'; |
|
693 |
break; |
|
694 |
case T_OBJECT: |
|
695 |
result[idx++] = 'O'; |
|
696 |
break; |
|
697 |
case T_ARRAY: |
|
698 |
result[idx++] = '['; |
|
699 |
break; |
|
700 |
case T_ADDRESS: |
|
701 |
result[idx++] = 'P'; |
|
702 |
break; |
|
703 |
case T_NARROWOOP: |
|
704 |
result[idx++] = 'N'; |
|
705 |
break; |
|
706 |
case T_METADATA: |
|
707 |
result[idx++] = 'M'; |
|
708 |
break; |
|
709 |
case T_NARROWKLASS: |
|
710 |
result[idx++] = 'K'; |
|
711 |
break; |
|
712 |
default: |
|
713 |
result[idx++] = '?'; |
|
714 |
break; |
|
715 |
} |
|
716 |
} |
|
717 |
result[idx++] = ')'; |
|
718 |
result[idx] = '\0'; |
|
719 |
} |
|
720 |
#endif |
|
721 |
||
722 |
// --------------------------------------------------------------- |
|
723 |
AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm, |
|
724 |
int total_args_passed, |
|
725 |
int comp_args_on_stack, |
|
726 |
const BasicType *sig_bt, |
|
727 |
const VMRegPair *regs, |
|
728 |
AdapterFingerPrint* fingerprint) { |
|
729 |
address i2c_entry = __ pc(); |
|
730 |
#ifdef BUILTIN_SIM |
|
731 |
char *name = NULL; |
|
732 |
AArch64Simulator *sim = NULL; |
|
733 |
size_t len = 65536; |
|
734 |
if (NotifySimulator) { |
|
735 |
name = NEW_C_HEAP_ARRAY(char, len, mtInternal); |
|
736 |
} |
|
737 |
||
738 |
if (name) { |
|
739 |
generate_i2c_adapter_name(name, total_args_passed, sig_bt); |
|
740 |
sim = AArch64Simulator::get_current(UseSimulatorCache, DisableBCCheck); |
|
741 |
sim->notifyCompile(name, i2c_entry); |
|
742 |
} |
|
743 |
#endif |
|
744 |
gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs); |
|
745 |
||
746 |
address c2i_unverified_entry = __ pc(); |
|
747 |
Label skip_fixup; |
|
748 |
||
749 |
Label ok; |
|
750 |
||
751 |
Register holder = rscratch2; |
|
752 |
Register receiver = j_rarg0; |
|
753 |
Register tmp = r10; // A call-clobbered register not used for arg passing |
|
754 |
||
755 |
// ------------------------------------------------------------------------- |
|
756 |
// Generate a C2I adapter. On entry we know rmethod holds the Method* during calls |
|
757 |
// to the interpreter. The args start out packed in the compiled layout. They |
|
758 |
// need to be unpacked into the interpreter layout. This will almost always |
|
759 |
// require some stack space. We grow the current (compiled) stack, then repack |
|
760 |
// the args. We finally end in a jump to the generic interpreter entry point. |
|
761 |
// On exit from the interpreter, the interpreter will restore our SP (lest the |
|
762 |
// compiled code, which relys solely on SP and not FP, get sick). |
|
763 |
||
764 |
{ |
|
765 |
__ block_comment("c2i_unverified_entry {"); |
|
766 |
__ load_klass(rscratch1, receiver); |
|
767 |
__ ldr(tmp, Address(holder, CompiledICHolder::holder_klass_offset())); |
|
768 |
__ cmp(rscratch1, tmp); |
|
769 |
__ ldr(rmethod, Address(holder, CompiledICHolder::holder_method_offset())); |
|
770 |
__ br(Assembler::EQ, ok); |
|
771 |
__ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); |
|
772 |
||
773 |
__ bind(ok); |
|
774 |
// Method might have been compiled since the call site was patched to |
|
775 |
// interpreted; if that is the case treat it as a miss so we can get |
|
776 |
// the call site corrected. |
|
777 |
__ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset()))); |
|
778 |
__ cbz(rscratch1, skip_fixup); |
|
779 |
__ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); |
|
780 |
__ block_comment("} c2i_unverified_entry"); |
|
781 |
} |
|
782 |
||
783 |
address c2i_entry = __ pc(); |
|
784 |
||
785 |
#ifdef BUILTIN_SIM |
|
786 |
if (name) { |
|
787 |
name[0] = 'c'; |
|
788 |
name[2] = 'i'; |
|
789 |
sim->notifyCompile(name, c2i_entry); |
|
790 |
FREE_C_HEAP_ARRAY(char, name, mtInternal); |
|
791 |
} |
|
792 |
#endif |
|
793 |
||
794 |
gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup); |
|
795 |
||
796 |
__ flush(); |
|
797 |
return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry); |
|
798 |
} |
|
799 |
||
800 |
int SharedRuntime::c_calling_convention(const BasicType *sig_bt, |
|
801 |
VMRegPair *regs, |
|
802 |
VMRegPair *regs2, |
|
803 |
int total_args_passed) { |
|
804 |
assert(regs2 == NULL, "not needed on AArch64"); |
|
805 |
||
806 |
// We return the amount of VMRegImpl stack slots we need to reserve for all |
|
807 |
// the arguments NOT counting out_preserve_stack_slots. |
|
808 |
||
809 |
static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = { |
|
810 |
c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5, c_rarg6, c_rarg7 |
|
811 |
}; |
|
812 |
static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = { |
|
813 |
c_farg0, c_farg1, c_farg2, c_farg3, |
|
814 |
c_farg4, c_farg5, c_farg6, c_farg7 |
|
815 |
}; |
|
816 |
||
817 |
uint int_args = 0; |
|
818 |
uint fp_args = 0; |
|
819 |
uint stk_args = 0; // inc by 2 each time |
|
820 |
||
821 |
for (int i = 0; i < total_args_passed; i++) { |
|
822 |
switch (sig_bt[i]) { |
|
823 |
case T_BOOLEAN: |
|
824 |
case T_CHAR: |
|
825 |
case T_BYTE: |
|
826 |
case T_SHORT: |
|
827 |
case T_INT: |
|
828 |
if (int_args < Argument::n_int_register_parameters_c) { |
|
829 |
regs[i].set1(INT_ArgReg[int_args++]->as_VMReg()); |
|
830 |
} else { |
|
831 |
regs[i].set1(VMRegImpl::stack2reg(stk_args)); |
|
832 |
stk_args += 2; |
|
833 |
} |
|
834 |
break; |
|
835 |
case T_LONG: |
|
836 |
assert(sig_bt[i + 1] == T_VOID, "expecting half"); |
|
837 |
// fall through |
|
838 |
case T_OBJECT: |
|
839 |
case T_ARRAY: |
|
840 |
case T_ADDRESS: |
|
841 |
case T_METADATA: |
|
842 |
if (int_args < Argument::n_int_register_parameters_c) { |
|
843 |
regs[i].set2(INT_ArgReg[int_args++]->as_VMReg()); |
|
844 |
} else { |
|
845 |
regs[i].set2(VMRegImpl::stack2reg(stk_args)); |
|
846 |
stk_args += 2; |
|
847 |
} |
|
848 |
break; |
|
849 |
case T_FLOAT: |
|
850 |
if (fp_args < Argument::n_float_register_parameters_c) { |
|
851 |
regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg()); |
|
852 |
} else { |
|
853 |
regs[i].set1(VMRegImpl::stack2reg(stk_args)); |
|
854 |
stk_args += 2; |
|
855 |
} |
|
856 |
break; |
|
857 |
case T_DOUBLE: |
|
858 |
assert(sig_bt[i + 1] == T_VOID, "expecting half"); |
|
859 |
if (fp_args < Argument::n_float_register_parameters_c) { |
|
860 |
regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg()); |
|
861 |
} else { |
|
862 |
regs[i].set2(VMRegImpl::stack2reg(stk_args)); |
|
863 |
stk_args += 2; |
|
864 |
} |
|
865 |
break; |
|
866 |
case T_VOID: // Halves of longs and doubles |
|
867 |
assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half"); |
|
868 |
regs[i].set_bad(); |
|
869 |
break; |
|
870 |
default: |
|
871 |
ShouldNotReachHere(); |
|
872 |
break; |
|
873 |
} |
|
874 |
} |
|
875 |
||
876 |
return stk_args; |
|
877 |
} |
|
878 |
||
879 |
// On 64 bit we will store integer like items to the stack as |
|
880 |
// 64 bits items (sparc abi) even though java would only store |
|
881 |
// 32bits for a parameter. On 32bit it will simply be 32 bits |
|
882 |
// So this routine will do 32->32 on 32bit and 32->64 on 64bit |
|
883 |
static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { |
|
884 |
if (src.first()->is_stack()) { |
|
885 |
if (dst.first()->is_stack()) { |
|
886 |
// stack to stack |
|
887 |
__ ldr(rscratch1, Address(rfp, reg2offset_in(src.first()))); |
|
888 |
__ str(rscratch1, Address(sp, reg2offset_out(dst.first()))); |
|
889 |
} else { |
|
890 |
// stack to reg |
|
891 |
__ ldrsw(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first()))); |
|
892 |
} |
|
893 |
} else if (dst.first()->is_stack()) { |
|
894 |
// reg to stack |
|
895 |
// Do we really have to sign extend??? |
|
896 |
// __ movslq(src.first()->as_Register(), src.first()->as_Register()); |
|
897 |
__ str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first()))); |
|
898 |
} else { |
|
899 |
if (dst.first() != src.first()) { |
|
900 |
__ sxtw(dst.first()->as_Register(), src.first()->as_Register()); |
|
901 |
} |
|
902 |
} |
|
903 |
} |
|
904 |
||
905 |
// An oop arg. Must pass a handle not the oop itself |
|
906 |
static void object_move(MacroAssembler* masm, |
|
907 |
OopMap* map, |
|
908 |
int oop_handle_offset, |
|
909 |
int framesize_in_slots, |
|
910 |
VMRegPair src, |
|
911 |
VMRegPair dst, |
|
912 |
bool is_receiver, |
|
913 |
int* receiver_offset) { |
|
914 |
||
915 |
// must pass a handle. First figure out the location we use as a handle |
|
916 |
||
917 |
Register rHandle = dst.first()->is_stack() ? rscratch2 : dst.first()->as_Register(); |
|
918 |
||
919 |
// See if oop is NULL if it is we need no handle |
|
920 |
||
921 |
if (src.first()->is_stack()) { |
|
922 |
||
923 |
// Oop is already on the stack as an argument |
|
924 |
int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); |
|
925 |
map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); |
|
926 |
if (is_receiver) { |
|
927 |
*receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; |
|
928 |
} |
|
929 |
||
930 |
__ ldr(rscratch1, Address(rfp, reg2offset_in(src.first()))); |
|
931 |
__ lea(rHandle, Address(rfp, reg2offset_in(src.first()))); |
|
932 |
// conditionally move a NULL |
|
933 |
__ cmp(rscratch1, zr); |
|
934 |
__ csel(rHandle, zr, rHandle, Assembler::EQ); |
|
935 |
} else { |
|
936 |
||
937 |
// Oop is in an a register we must store it to the space we reserve |
|
938 |
// on the stack for oop_handles and pass a handle if oop is non-NULL |
|
939 |
||
940 |
const Register rOop = src.first()->as_Register(); |
|
941 |
int oop_slot; |
|
942 |
if (rOop == j_rarg0) |
|
943 |
oop_slot = 0; |
|
944 |
else if (rOop == j_rarg1) |
|
945 |
oop_slot = 1; |
|
946 |
else if (rOop == j_rarg2) |
|
947 |
oop_slot = 2; |
|
948 |
else if (rOop == j_rarg3) |
|
949 |
oop_slot = 3; |
|
950 |
else if (rOop == j_rarg4) |
|
951 |
oop_slot = 4; |
|
952 |
else if (rOop == j_rarg5) |
|
953 |
oop_slot = 5; |
|
954 |
else if (rOop == j_rarg6) |
|
955 |
oop_slot = 6; |
|
956 |
else { |
|
957 |
assert(rOop == j_rarg7, "wrong register"); |
|
958 |
oop_slot = 7; |
|
959 |
} |
|
960 |
||
961 |
oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset; |
|
962 |
int offset = oop_slot*VMRegImpl::stack_slot_size; |
|
963 |
||
964 |
map->set_oop(VMRegImpl::stack2reg(oop_slot)); |
|
965 |
// Store oop in handle area, may be NULL |
|
966 |
__ str(rOop, Address(sp, offset)); |
|
967 |
if (is_receiver) { |
|
968 |
*receiver_offset = offset; |
|
969 |
} |
|
970 |
||
971 |
__ cmp(rOop, zr); |
|
972 |
__ lea(rHandle, Address(sp, offset)); |
|
973 |
// conditionally move a NULL |
|
974 |
__ csel(rHandle, zr, rHandle, Assembler::EQ); |
|
975 |
} |
|
976 |
||
977 |
// If arg is on the stack then place it otherwise it is already in correct reg. |
|
978 |
if (dst.first()->is_stack()) { |
|
979 |
__ str(rHandle, Address(sp, reg2offset_out(dst.first()))); |
|
980 |
} |
|
981 |
} |
|
982 |
||
983 |
// A float arg may have to do float reg int reg conversion |
|
984 |
static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { |
|
985 |
if (src.first() != dst.first()) { |
|
986 |
if (src.is_single_phys_reg() && dst.is_single_phys_reg()) |
|
987 |
__ fmovs(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); |
|
988 |
else |
|
989 |
ShouldNotReachHere(); |
|
990 |
} |
|
991 |
} |
|
992 |
||
993 |
// A long move |
|
994 |
static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { |
|
995 |
if (src.first()->is_stack()) { |
|
996 |
if (dst.first()->is_stack()) { |
|
997 |
// stack to stack |
|
998 |
__ ldr(rscratch1, Address(rfp, reg2offset_in(src.first()))); |
|
999 |
__ str(rscratch1, Address(sp, reg2offset_out(dst.first()))); |
|
1000 |
} else { |
|
1001 |
// stack to reg |
|
1002 |
__ ldr(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first()))); |
|
1003 |
} |
|
1004 |
} else if (dst.first()->is_stack()) { |
|
1005 |
// reg to stack |
|
1006 |
// Do we really have to sign extend??? |
|
1007 |
// __ movslq(src.first()->as_Register(), src.first()->as_Register()); |
|
1008 |
__ str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first()))); |
|
1009 |
} else { |
|
1010 |
if (dst.first() != src.first()) { |
|
1011 |
__ mov(dst.first()->as_Register(), src.first()->as_Register()); |
|
1012 |
} |
|
1013 |
} |
|
1014 |
} |
|
1015 |
||
1016 |
||
1017 |
// A double move |
|
1018 |
static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { |
|
1019 |
if (src.first() != dst.first()) { |
|
1020 |
if (src.is_single_phys_reg() && dst.is_single_phys_reg()) |
|
1021 |
__ fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); |
|
1022 |
else |
|
1023 |
ShouldNotReachHere(); |
|
1024 |
} |
|
1025 |
} |
|
1026 |
||
1027 |
||
1028 |
void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { |
|
1029 |
// We always ignore the frame_slots arg and just use the space just below frame pointer |
|
1030 |
// which by this time is free to use |
|
1031 |
switch (ret_type) { |
|
1032 |
case T_FLOAT: |
|
1033 |
__ strs(v0, Address(rfp, -wordSize)); |
|
1034 |
break; |
|
1035 |
case T_DOUBLE: |
|
1036 |
__ strd(v0, Address(rfp, -wordSize)); |
|
1037 |
break; |
|
1038 |
case T_VOID: break; |
|
1039 |
default: { |
|
1040 |
__ str(r0, Address(rfp, -wordSize)); |
|
1041 |
} |
|
1042 |
} |
|
1043 |
} |
|
1044 |
||
1045 |
void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { |
|
1046 |
// We always ignore the frame_slots arg and just use the space just below frame pointer |
|
1047 |
// which by this time is free to use |
|
1048 |
switch (ret_type) { |
|
1049 |
case T_FLOAT: |
|
1050 |
__ ldrs(v0, Address(rfp, -wordSize)); |
|
1051 |
break; |
|
1052 |
case T_DOUBLE: |
|
1053 |
__ ldrd(v0, Address(rfp, -wordSize)); |
|
1054 |
break; |
|
1055 |
case T_VOID: break; |
|
1056 |
default: { |
|
1057 |
__ ldr(r0, Address(rfp, -wordSize)); |
|
1058 |
} |
|
1059 |
} |
|
1060 |
} |
|
1061 |
static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) { |
|
1062 |
RegSet x; |
|
1063 |
for ( int i = first_arg ; i < arg_count ; i++ ) { |
|
1064 |
if (args[i].first()->is_Register()) { |
|
1065 |
x = x + args[i].first()->as_Register(); |
|
1066 |
} else if (args[i].first()->is_FloatRegister()) { |
|
1067 |
__ strd(args[i].first()->as_FloatRegister(), Address(__ pre(sp, -2 * wordSize))); |
|
1068 |
} |
|
1069 |
} |
|
1070 |
__ push(x, sp); |
|
1071 |
} |
|
1072 |
||
1073 |
static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) { |
|
1074 |
RegSet x; |
|
1075 |
for ( int i = first_arg ; i < arg_count ; i++ ) { |
|
1076 |
if (args[i].first()->is_Register()) { |
|
1077 |
x = x + args[i].first()->as_Register(); |
|
1078 |
} else { |
|
1079 |
; |
|
1080 |
} |
|
1081 |
} |
|
1082 |
__ pop(x, sp); |
|
1083 |
for ( int i = first_arg ; i < arg_count ; i++ ) { |
|
1084 |
if (args[i].first()->is_Register()) { |
|
1085 |
; |
|
1086 |
} else if (args[i].first()->is_FloatRegister()) { |
|
1087 |
__ ldrd(args[i].first()->as_FloatRegister(), Address(__ post(sp, 2 * wordSize))); |
|
1088 |
} |
|
1089 |
} |
|
1090 |
} |
|
1091 |
||
1092 |
||
1093 |
// Check GC_locker::needs_gc and enter the runtime if it's true. This |
|
1094 |
// keeps a new JNI critical region from starting until a GC has been |
|
1095 |
// forced. Save down any oops in registers and describe them in an |
|
1096 |
// OopMap. |
|
1097 |
static void check_needs_gc_for_critical_native(MacroAssembler* masm, |
|
1098 |
int stack_slots, |
|
1099 |
int total_c_args, |
|
1100 |
int total_in_args, |
|
1101 |
int arg_save_area, |
|
1102 |
OopMapSet* oop_maps, |
|
1103 |
VMRegPair* in_regs, |
|
1104 |
BasicType* in_sig_bt) { Unimplemented(); } |
|
1105 |
||
1106 |
// Unpack an array argument into a pointer to the body and the length |
|
1107 |
// if the array is non-null, otherwise pass 0 for both. |
|
1108 |
static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) { Unimplemented(); } |
|
1109 |
||
1110 |
||
1111 |
class ComputeMoveOrder: public StackObj { |
|
1112 |
class MoveOperation: public ResourceObj { |
|
1113 |
friend class ComputeMoveOrder; |
|
1114 |
private: |
|
1115 |
VMRegPair _src; |
|
1116 |
VMRegPair _dst; |
|
1117 |
int _src_index; |
|
1118 |
int _dst_index; |
|
1119 |
bool _processed; |
|
1120 |
MoveOperation* _next; |
|
1121 |
MoveOperation* _prev; |
|
1122 |
||
1123 |
static int get_id(VMRegPair r) { Unimplemented(); return 0; } |
|
1124 |
||
1125 |
public: |
|
1126 |
MoveOperation(int src_index, VMRegPair src, int dst_index, VMRegPair dst): |
|
1127 |
_src(src) |
|
1128 |
, _src_index(src_index) |
|
1129 |
, _dst(dst) |
|
1130 |
, _dst_index(dst_index) |
|
1131 |
, _next(NULL) |
|
1132 |
, _prev(NULL) |
|
1133 |
, _processed(false) { Unimplemented(); } |
|
1134 |
||
1135 |
VMRegPair src() const { Unimplemented(); return _src; } |
|
1136 |
int src_id() const { Unimplemented(); return 0; } |
|
1137 |
int src_index() const { Unimplemented(); return 0; } |
|
1138 |
VMRegPair dst() const { Unimplemented(); return _src; } |
|
1139 |
void set_dst(int i, VMRegPair dst) { Unimplemented(); } |
|
1140 |
int dst_index() const { Unimplemented(); return 0; } |
|
1141 |
int dst_id() const { Unimplemented(); return 0; } |
|
1142 |
MoveOperation* next() const { Unimplemented(); return 0; } |
|
1143 |
MoveOperation* prev() const { Unimplemented(); return 0; } |
|
1144 |
void set_processed() { Unimplemented(); } |
|
1145 |
bool is_processed() const { Unimplemented(); return 0; } |
|
1146 |
||
1147 |
// insert |
|
1148 |
void break_cycle(VMRegPair temp_register) { Unimplemented(); } |
|
1149 |
||
1150 |
void link(GrowableArray<MoveOperation*>& killer) { Unimplemented(); } |
|
1151 |
}; |
|
1152 |
||
1153 |
private: |
|
1154 |
GrowableArray<MoveOperation*> edges; |
|
1155 |
||
1156 |
public: |
|
1157 |
ComputeMoveOrder(int total_in_args, VMRegPair* in_regs, int total_c_args, VMRegPair* out_regs, |
|
1158 |
BasicType* in_sig_bt, GrowableArray<int>& arg_order, VMRegPair tmp_vmreg) { Unimplemented(); } |
|
1159 |
||
1160 |
// Collected all the move operations |
|
1161 |
void add_edge(int src_index, VMRegPair src, int dst_index, VMRegPair dst) { Unimplemented(); } |
|
1162 |
||
1163 |
// Walk the edges breaking cycles between moves. The result list |
|
1164 |
// can be walked in order to produce the proper set of loads |
|
1165 |
GrowableArray<MoveOperation*>* get_store_order(VMRegPair temp_register) { Unimplemented(); return 0; } |
|
1166 |
}; |
|
1167 |
||
1168 |
||
1169 |
static void rt_call(MacroAssembler* masm, address dest, int gpargs, int fpargs, int type) { |
|
1170 |
CodeBlob *cb = CodeCache::find_blob(dest); |
|
1171 |
if (cb) { |
|
1172 |
__ far_call(RuntimeAddress(dest)); |
|
1173 |
} else { |
|
1174 |
assert((unsigned)gpargs < 256, "eek!"); |
|
1175 |
assert((unsigned)fpargs < 32, "eek!"); |
|
1176 |
__ lea(rscratch1, RuntimeAddress(dest)); |
|
33084 | 1177 |
if (UseBuiltinSim) __ mov(rscratch2, (gpargs << 6) | (fpargs << 2) | type); |
29183 | 1178 |
__ blrt(rscratch1, rscratch2); |
1179 |
__ maybe_isb(); |
|
1180 |
} |
|
1181 |
} |
|
1182 |
||
1183 |
static void verify_oop_args(MacroAssembler* masm, |
|
1184 |
methodHandle method, |
|
1185 |
const BasicType* sig_bt, |
|
1186 |
const VMRegPair* regs) { |
|
1187 |
Register temp_reg = r19; // not part of any compiled calling seq |
|
1188 |
if (VerifyOops) { |
|
1189 |
for (int i = 0; i < method->size_of_parameters(); i++) { |
|
1190 |
if (sig_bt[i] == T_OBJECT || |
|
1191 |
sig_bt[i] == T_ARRAY) { |
|
1192 |
VMReg r = regs[i].first(); |
|
1193 |
assert(r->is_valid(), "bad oop arg"); |
|
1194 |
if (r->is_stack()) { |
|
1195 |
__ ldr(temp_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size)); |
|
1196 |
__ verify_oop(temp_reg); |
|
1197 |
} else { |
|
1198 |
__ verify_oop(r->as_Register()); |
|
1199 |
} |
|
1200 |
} |
|
1201 |
} |
|
1202 |
} |
|
1203 |
} |
|
1204 |
||
1205 |
static void gen_special_dispatch(MacroAssembler* masm, |
|
1206 |
methodHandle method, |
|
1207 |
const BasicType* sig_bt, |
|
1208 |
const VMRegPair* regs) { |
|
1209 |
verify_oop_args(masm, method, sig_bt, regs); |
|
1210 |
vmIntrinsics::ID iid = method->intrinsic_id(); |
|
1211 |
||
1212 |
// Now write the args into the outgoing interpreter space |
|
1213 |
bool has_receiver = false; |
|
1214 |
Register receiver_reg = noreg; |
|
1215 |
int member_arg_pos = -1; |
|
1216 |
Register member_reg = noreg; |
|
1217 |
int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid); |
|
1218 |
if (ref_kind != 0) { |
|
1219 |
member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument |
|
1220 |
member_reg = r19; // known to be free at this point |
|
1221 |
has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind); |
|
1222 |
} else if (iid == vmIntrinsics::_invokeBasic) { |
|
1223 |
has_receiver = true; |
|
1224 |
} else { |
|
33105
294e48b4f704
8080775: Better argument formatting for assert() and friends
david
parents:
32395
diff
changeset
|
1225 |
fatal("unexpected intrinsic id %d", iid); |
29183 | 1226 |
} |
1227 |
||
1228 |
if (member_reg != noreg) { |
|
1229 |
// Load the member_arg into register, if necessary. |
|
1230 |
SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs); |
|
1231 |
VMReg r = regs[member_arg_pos].first(); |
|
1232 |
if (r->is_stack()) { |
|
1233 |
__ ldr(member_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size)); |
|
1234 |
} else { |
|
1235 |
// no data motion is needed |
|
1236 |
member_reg = r->as_Register(); |
|
1237 |
} |
|
1238 |
} |
|
1239 |
||
1240 |
if (has_receiver) { |
|
1241 |
// Make sure the receiver is loaded into a register. |
|
1242 |
assert(method->size_of_parameters() > 0, "oob"); |
|
1243 |
assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object"); |
|
1244 |
VMReg r = regs[0].first(); |
|
1245 |
assert(r->is_valid(), "bad receiver arg"); |
|
1246 |
if (r->is_stack()) { |
|
1247 |
// Porting note: This assumes that compiled calling conventions always |
|
1248 |
// pass the receiver oop in a register. If this is not true on some |
|
1249 |
// platform, pick a temp and load the receiver from stack. |
|
1250 |
fatal("receiver always in a register"); |
|
1251 |
receiver_reg = r2; // known to be free at this point |
|
1252 |
__ ldr(receiver_reg, Address(sp, r->reg2stack() * VMRegImpl::stack_slot_size)); |
|
1253 |
} else { |
|
1254 |
// no data motion is needed |
|
1255 |
receiver_reg = r->as_Register(); |
|
1256 |
} |
|
1257 |
} |
|
1258 |
||
1259 |
// Figure out which address we are really jumping to: |
|
1260 |
MethodHandles::generate_method_handle_dispatch(masm, iid, |
|
1261 |
receiver_reg, member_reg, /*for_compiler_entry:*/ true); |
|
1262 |
} |
|
1263 |
||
1264 |
// --------------------------------------------------------------------------- |
|
1265 |
// Generate a native wrapper for a given method. The method takes arguments |
|
1266 |
// in the Java compiled code convention, marshals them to the native |
|
1267 |
// convention (handlizes oops, etc), transitions to native, makes the call, |
|
1268 |
// returns to java state (possibly blocking), unhandlizes any result and |
|
1269 |
// returns. |
|
1270 |
// |
|
1271 |
// Critical native functions are a shorthand for the use of |
|
1272 |
// GetPrimtiveArrayCritical and disallow the use of any other JNI |
|
1273 |
// functions. The wrapper is expected to unpack the arguments before |
|
1274 |
// passing them to the callee and perform checks before and after the |
|
1275 |
// native call to ensure that they GC_locker |
|
1276 |
// lock_critical/unlock_critical semantics are followed. Some other |
|
1277 |
// parts of JNI setup are skipped like the tear down of the JNI handle |
|
1278 |
// block and the check for pending exceptions it's impossible for them |
|
1279 |
// to be thrown. |
|
1280 |
// |
|
1281 |
// They are roughly structured like this: |
|
1282 |
// if (GC_locker::needs_gc()) |
|
1283 |
// SharedRuntime::block_for_jni_critical(); |
|
1284 |
// tranistion to thread_in_native |
|
1285 |
// unpack arrray arguments and call native entry point |
|
1286 |
// check for safepoint in progress |
|
1287 |
// check if any thread suspend flags are set |
|
1288 |
// call into JVM and possible unlock the JNI critical |
|
1289 |
// if a GC was suppressed while in the critical native. |
|
1290 |
// transition back to thread_in_Java |
|
1291 |
// return to caller |
|
1292 |
// |
|
1293 |
nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, |
|
33593
60764a78fa5c
8140274: methodHandles and constantPoolHandles should be passed as const references
coleenp
parents:
33198
diff
changeset
|
1294 |
const methodHandle& method, |
29183 | 1295 |
int compile_id, |
1296 |
BasicType* in_sig_bt, |
|
1297 |
VMRegPair* in_regs, |
|
1298 |
BasicType ret_type) { |
|
1299 |
#ifdef BUILTIN_SIM |
|
1300 |
if (NotifySimulator) { |
|
1301 |
// Names are up to 65536 chars long. UTF8-coded strings are up to |
|
1302 |
// 3 bytes per character. We concatenate three such strings. |
|
1303 |
// Yes, I know this is ridiculous, but it's debug code and glibc |
|
1304 |
// allocates large arrays very efficiently. |
|
1305 |
size_t len = (65536 * 3) * 3; |
|
1306 |
char *name = new char[len]; |
|
1307 |
||
1308 |
strncpy(name, method()->method_holder()->name()->as_utf8(), len); |
|
1309 |
strncat(name, ".", len); |
|
1310 |
strncat(name, method()->name()->as_utf8(), len); |
|
1311 |
strncat(name, method()->signature()->as_utf8(), len); |
|
1312 |
AArch64Simulator::get_current(UseSimulatorCache, DisableBCCheck)->notifyCompile(name, __ pc()); |
|
1313 |
delete[] name; |
|
1314 |
} |
|
1315 |
#endif |
|
1316 |
||
1317 |
if (method->is_method_handle_intrinsic()) { |
|
1318 |
vmIntrinsics::ID iid = method->intrinsic_id(); |
|
1319 |
intptr_t start = (intptr_t)__ pc(); |
|
1320 |
int vep_offset = ((intptr_t)__ pc()) - start; |
|
1321 |
||
1322 |
// First instruction must be a nop as it may need to be patched on deoptimisation |
|
1323 |
__ nop(); |
|
1324 |
gen_special_dispatch(masm, |
|
1325 |
method, |
|
1326 |
in_sig_bt, |
|
1327 |
in_regs); |
|
1328 |
int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period |
|
1329 |
__ flush(); |
|
1330 |
int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually |
|
1331 |
return nmethod::new_native_nmethod(method, |
|
1332 |
compile_id, |
|
1333 |
masm->code(), |
|
1334 |
vep_offset, |
|
1335 |
frame_complete, |
|
1336 |
stack_slots / VMRegImpl::slots_per_word, |
|
1337 |
in_ByteSize(-1), |
|
1338 |
in_ByteSize(-1), |
|
1339 |
(OopMapSet*)NULL); |
|
1340 |
} |
|
1341 |
bool is_critical_native = true; |
|
1342 |
address native_func = method->critical_native_function(); |
|
1343 |
if (native_func == NULL) { |
|
1344 |
native_func = method->native_function(); |
|
1345 |
is_critical_native = false; |
|
1346 |
} |
|
1347 |
assert(native_func != NULL, "must have function"); |
|
1348 |
||
1349 |
// An OopMap for lock (and class if static) |
|
1350 |
OopMapSet *oop_maps = new OopMapSet(); |
|
1351 |
intptr_t start = (intptr_t)__ pc(); |
|
1352 |
||
1353 |
// We have received a description of where all the java arg are located |
|
1354 |
// on entry to the wrapper. We need to convert these args to where |
|
1355 |
// the jni function will expect them. To figure out where they go |
|
1356 |
// we convert the java signature to a C signature by inserting |
|
1357 |
// the hidden arguments as arg[0] and possibly arg[1] (static method) |
|
1358 |
||
1359 |
const int total_in_args = method->size_of_parameters(); |
|
1360 |
int total_c_args = total_in_args; |
|
1361 |
if (!is_critical_native) { |
|
1362 |
total_c_args += 1; |
|
1363 |
if (method->is_static()) { |
|
1364 |
total_c_args++; |
|
1365 |
} |
|
1366 |
} else { |
|
1367 |
for (int i = 0; i < total_in_args; i++) { |
|
1368 |
if (in_sig_bt[i] == T_ARRAY) { |
|
1369 |
total_c_args++; |
|
1370 |
} |
|
1371 |
} |
|
1372 |
} |
|
1373 |
||
1374 |
BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); |
|
1375 |
VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); |
|
1376 |
BasicType* in_elem_bt = NULL; |
|
1377 |
||
1378 |
int argc = 0; |
|
1379 |
if (!is_critical_native) { |
|
1380 |
out_sig_bt[argc++] = T_ADDRESS; |
|
1381 |
if (method->is_static()) { |
|
1382 |
out_sig_bt[argc++] = T_OBJECT; |
|
1383 |
} |
|
1384 |
||
1385 |
for (int i = 0; i < total_in_args ; i++ ) { |
|
1386 |
out_sig_bt[argc++] = in_sig_bt[i]; |
|
1387 |
} |
|
1388 |
} else { |
|
1389 |
Thread* THREAD = Thread::current(); |
|
1390 |
in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args); |
|
1391 |
SignatureStream ss(method->signature()); |
|
1392 |
for (int i = 0; i < total_in_args ; i++ ) { |
|
1393 |
if (in_sig_bt[i] == T_ARRAY) { |
|
1394 |
// Arrays are passed as int, elem* pair |
|
1395 |
out_sig_bt[argc++] = T_INT; |
|
1396 |
out_sig_bt[argc++] = T_ADDRESS; |
|
1397 |
Symbol* atype = ss.as_symbol(CHECK_NULL); |
|
1398 |
const char* at = atype->as_C_string(); |
|
1399 |
if (strlen(at) == 2) { |
|
1400 |
assert(at[0] == '[', "must be"); |
|
1401 |
switch (at[1]) { |
|
1402 |
case 'B': in_elem_bt[i] = T_BYTE; break; |
|
1403 |
case 'C': in_elem_bt[i] = T_CHAR; break; |
|
1404 |
case 'D': in_elem_bt[i] = T_DOUBLE; break; |
|
1405 |
case 'F': in_elem_bt[i] = T_FLOAT; break; |
|
1406 |
case 'I': in_elem_bt[i] = T_INT; break; |
|
1407 |
case 'J': in_elem_bt[i] = T_LONG; break; |
|
1408 |
case 'S': in_elem_bt[i] = T_SHORT; break; |
|
1409 |
case 'Z': in_elem_bt[i] = T_BOOLEAN; break; |
|
1410 |
default: ShouldNotReachHere(); |
|
1411 |
} |
|
1412 |
} |
|
1413 |
} else { |
|
1414 |
out_sig_bt[argc++] = in_sig_bt[i]; |
|
1415 |
in_elem_bt[i] = T_VOID; |
|
1416 |
} |
|
1417 |
if (in_sig_bt[i] != T_VOID) { |
|
1418 |
assert(in_sig_bt[i] == ss.type(), "must match"); |
|
1419 |
ss.next(); |
|
1420 |
} |
|
1421 |
} |
|
1422 |
} |
|
1423 |
||
1424 |
// Now figure out where the args must be stored and how much stack space |
|
1425 |
// they require. |
|
1426 |
int out_arg_slots; |
|
1427 |
out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args); |
|
1428 |
||
1429 |
// Compute framesize for the wrapper. We need to handlize all oops in |
|
1430 |
// incoming registers |
|
1431 |
||
1432 |
// Calculate the total number of stack slots we will need. |
|
1433 |
||
1434 |
// First count the abi requirement plus all of the outgoing args |
|
1435 |
int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots; |
|
1436 |
||
1437 |
// Now the space for the inbound oop handle area |
|
1438 |
int total_save_slots = 8 * VMRegImpl::slots_per_word; // 8 arguments passed in registers |
|
1439 |
if (is_critical_native) { |
|
1440 |
// Critical natives may have to call out so they need a save area |
|
1441 |
// for register arguments. |
|
1442 |
int double_slots = 0; |
|
1443 |
int single_slots = 0; |
|
1444 |
for ( int i = 0; i < total_in_args; i++) { |
|
1445 |
if (in_regs[i].first()->is_Register()) { |
|
1446 |
const Register reg = in_regs[i].first()->as_Register(); |
|
1447 |
switch (in_sig_bt[i]) { |
|
1448 |
case T_BOOLEAN: |
|
1449 |
case T_BYTE: |
|
1450 |
case T_SHORT: |
|
1451 |
case T_CHAR: |
|
1452 |
case T_INT: single_slots++; break; |
|
1453 |
case T_ARRAY: // specific to LP64 (7145024) |
|
1454 |
case T_LONG: double_slots++; break; |
|
1455 |
default: ShouldNotReachHere(); |
|
1456 |
} |
|
1457 |
} else if (in_regs[i].first()->is_FloatRegister()) { |
|
1458 |
ShouldNotReachHere(); |
|
1459 |
} |
|
1460 |
} |
|
1461 |
total_save_slots = double_slots * 2 + single_slots; |
|
1462 |
// align the save area |
|
1463 |
if (double_slots != 0) { |
|
1464 |
stack_slots = round_to(stack_slots, 2); |
|
1465 |
} |
|
1466 |
} |
|
1467 |
||
1468 |
int oop_handle_offset = stack_slots; |
|
1469 |
stack_slots += total_save_slots; |
|
1470 |
||
1471 |
// Now any space we need for handlizing a klass if static method |
|
1472 |
||
1473 |
int klass_slot_offset = 0; |
|
1474 |
int klass_offset = -1; |
|
1475 |
int lock_slot_offset = 0; |
|
1476 |
bool is_static = false; |
|
1477 |
||
1478 |
if (method->is_static()) { |
|
1479 |
klass_slot_offset = stack_slots; |
|
1480 |
stack_slots += VMRegImpl::slots_per_word; |
|
1481 |
klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size; |
|
1482 |
is_static = true; |
|
1483 |
} |
|
1484 |
||
1485 |
// Plus a lock if needed |
|
1486 |
||
1487 |
if (method->is_synchronized()) { |
|
1488 |
lock_slot_offset = stack_slots; |
|
1489 |
stack_slots += VMRegImpl::slots_per_word; |
|
1490 |
} |
|
1491 |
||
1492 |
// Now a place (+2) to save return values or temp during shuffling |
|
1493 |
// + 4 for return address (which we own) and saved rfp |
|
1494 |
stack_slots += 6; |
|
1495 |
||
1496 |
// Ok The space we have allocated will look like: |
|
1497 |
// |
|
1498 |
// |
|
1499 |
// FP-> | | |
|
1500 |
// |---------------------| |
|
1501 |
// | 2 slots for moves | |
|
1502 |
// |---------------------| |
|
1503 |
// | lock box (if sync) | |
|
1504 |
// |---------------------| <- lock_slot_offset |
|
1505 |
// | klass (if static) | |
|
1506 |
// |---------------------| <- klass_slot_offset |
|
1507 |
// | oopHandle area | |
|
1508 |
// |---------------------| <- oop_handle_offset (8 java arg registers) |
|
1509 |
// | outbound memory | |
|
1510 |
// | based arguments | |
|
1511 |
// | | |
|
1512 |
// |---------------------| |
|
1513 |
// | | |
|
1514 |
// SP-> | out_preserved_slots | |
|
1515 |
// |
|
1516 |
// |
|
1517 |
||
1518 |
||
1519 |
// Now compute actual number of stack words we need rounding to make |
|
1520 |
// stack properly aligned. |
|
1521 |
stack_slots = round_to(stack_slots, StackAlignmentInSlots); |
|
1522 |
||
1523 |
int stack_size = stack_slots * VMRegImpl::stack_slot_size; |
|
1524 |
||
1525 |
// First thing make an ic check to see if we should even be here |
|
1526 |
||
1527 |
// We are free to use all registers as temps without saving them and |
|
1528 |
// restoring them except rfp. rfp is the only callee save register |
|
1529 |
// as far as the interpreter and the compiler(s) are concerned. |
|
1530 |
||
1531 |
||
1532 |
const Register ic_reg = rscratch2; |
|
1533 |
const Register receiver = j_rarg0; |
|
1534 |
||
1535 |
Label hit; |
|
1536 |
Label exception_pending; |
|
1537 |
||
1538 |
assert_different_registers(ic_reg, receiver, rscratch1); |
|
1539 |
__ verify_oop(receiver); |
|
1540 |
__ cmp_klass(receiver, ic_reg, rscratch1); |
|
1541 |
__ br(Assembler::EQ, hit); |
|
1542 |
||
1543 |
__ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); |
|
1544 |
||
1545 |
// Verified entry point must be aligned |
|
1546 |
__ align(8); |
|
1547 |
||
1548 |
__ bind(hit); |
|
1549 |
||
1550 |
int vep_offset = ((intptr_t)__ pc()) - start; |
|
1551 |
||
1552 |
// If we have to make this method not-entrant we'll overwrite its |
|
1553 |
// first instruction with a jump. For this action to be legal we |
|
1554 |
// must ensure that this first instruction is a B, BL, NOP, BKPT, |
|
1555 |
// SVC, HVC, or SMC. Make it a NOP. |
|
1556 |
__ nop(); |
|
1557 |
||
33084 | 1558 |
// Generate stack overflow check |
29183 | 1559 |
if (UseStackBanging) { |
35201
996db89f378e
8139864: Improve handling of stack protection zones.
goetz
parents:
34185
diff
changeset
|
1560 |
__ bang_stack_with_offset(JavaThread::stack_shadow_zone_size()); |
29183 | 1561 |
} else { |
1562 |
Unimplemented(); |
|
1563 |
} |
|
1564 |
||
1565 |
// Generate a new frame for the wrapper. |
|
1566 |
__ enter(); |
|
1567 |
// -2 because return address is already present and so is saved rfp |
|
1568 |
__ sub(sp, sp, stack_size - 2*wordSize); |
|
1569 |
||
1570 |
// Frame is now completed as far as size and linkage. |
|
1571 |
int frame_complete = ((intptr_t)__ pc()) - start; |
|
1572 |
||
1573 |
// record entry into native wrapper code |
|
1574 |
if (NotifySimulator) { |
|
1575 |
__ notify(Assembler::method_entry); |
|
1576 |
} |
|
1577 |
||
1578 |
// We use r20 as the oop handle for the receiver/klass |
|
1579 |
// It is callee save so it survives the call to native |
|
1580 |
||
1581 |
const Register oop_handle_reg = r20; |
|
1582 |
||
1583 |
if (is_critical_native) { |
|
1584 |
check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args, |
|
1585 |
oop_handle_offset, oop_maps, in_regs, in_sig_bt); |
|
1586 |
} |
|
1587 |
||
1588 |
// |
|
1589 |
// We immediately shuffle the arguments so that any vm call we have to |
|
1590 |
// make from here on out (sync slow path, jvmti, etc.) we will have |
|
1591 |
// captured the oops from our caller and have a valid oopMap for |
|
1592 |
// them. |
|
1593 |
||
1594 |
// ----------------- |
|
1595 |
// The Grand Shuffle |
|
1596 |
||
1597 |
// The Java calling convention is either equal (linux) or denser (win64) than the |
|
1598 |
// c calling convention. However the because of the jni_env argument the c calling |
|
1599 |
// convention always has at least one more (and two for static) arguments than Java. |
|
1600 |
// Therefore if we move the args from java -> c backwards then we will never have |
|
1601 |
// a register->register conflict and we don't have to build a dependency graph |
|
1602 |
// and figure out how to break any cycles. |
|
1603 |
// |
|
1604 |
||
1605 |
// Record esp-based slot for receiver on stack for non-static methods |
|
1606 |
int receiver_offset = -1; |
|
1607 |
||
1608 |
// This is a trick. We double the stack slots so we can claim |
|
1609 |
// the oops in the caller's frame. Since we are sure to have |
|
1610 |
// more args than the caller doubling is enough to make |
|
1611 |
// sure we can capture all the incoming oop args from the |
|
1612 |
// caller. |
|
1613 |
// |
|
1614 |
OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); |
|
1615 |
||
1616 |
// Mark location of rfp (someday) |
|
1617 |
// map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rfp)); |
|
1618 |
||
1619 |
||
1620 |
int float_args = 0; |
|
1621 |
int int_args = 0; |
|
1622 |
||
1623 |
#ifdef ASSERT |
|
1624 |
bool reg_destroyed[RegisterImpl::number_of_registers]; |
|
1625 |
bool freg_destroyed[FloatRegisterImpl::number_of_registers]; |
|
1626 |
for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) { |
|
1627 |
reg_destroyed[r] = false; |
|
1628 |
} |
|
1629 |
for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) { |
|
1630 |
freg_destroyed[f] = false; |
|
1631 |
} |
|
1632 |
||
1633 |
#endif /* ASSERT */ |
|
1634 |
||
1635 |
// This may iterate in two different directions depending on the |
|
1636 |
// kind of native it is. The reason is that for regular JNI natives |
|
1637 |
// the incoming and outgoing registers are offset upwards and for |
|
1638 |
// critical natives they are offset down. |
|
1639 |
GrowableArray<int> arg_order(2 * total_in_args); |
|
1640 |
VMRegPair tmp_vmreg; |
|
1641 |
tmp_vmreg.set1(r19->as_VMReg()); |
|
1642 |
||
1643 |
if (!is_critical_native) { |
|
1644 |
for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) { |
|
1645 |
arg_order.push(i); |
|
1646 |
arg_order.push(c_arg); |
|
1647 |
} |
|
1648 |
} else { |
|
1649 |
// Compute a valid move order, using tmp_vmreg to break any cycles |
|
1650 |
ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg); |
|
1651 |
} |
|
1652 |
||
1653 |
int temploc = -1; |
|
1654 |
for (int ai = 0; ai < arg_order.length(); ai += 2) { |
|
1655 |
int i = arg_order.at(ai); |
|
1656 |
int c_arg = arg_order.at(ai + 1); |
|
1657 |
__ block_comment(err_msg("move %d -> %d", i, c_arg)); |
|
1658 |
if (c_arg == -1) { |
|
1659 |
assert(is_critical_native, "should only be required for critical natives"); |
|
1660 |
// This arg needs to be moved to a temporary |
|
1661 |
__ mov(tmp_vmreg.first()->as_Register(), in_regs[i].first()->as_Register()); |
|
1662 |
in_regs[i] = tmp_vmreg; |
|
1663 |
temploc = i; |
|
1664 |
continue; |
|
1665 |
} else if (i == -1) { |
|
1666 |
assert(is_critical_native, "should only be required for critical natives"); |
|
1667 |
// Read from the temporary location |
|
1668 |
assert(temploc != -1, "must be valid"); |
|
1669 |
i = temploc; |
|
1670 |
temploc = -1; |
|
1671 |
} |
|
1672 |
#ifdef ASSERT |
|
1673 |
if (in_regs[i].first()->is_Register()) { |
|
1674 |
assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!"); |
|
1675 |
} else if (in_regs[i].first()->is_FloatRegister()) { |
|
1676 |
assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!"); |
|
1677 |
} |
|
1678 |
if (out_regs[c_arg].first()->is_Register()) { |
|
1679 |
reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true; |
|
1680 |
} else if (out_regs[c_arg].first()->is_FloatRegister()) { |
|
1681 |
freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true; |
|
1682 |
} |
|
1683 |
#endif /* ASSERT */ |
|
1684 |
switch (in_sig_bt[i]) { |
|
1685 |
case T_ARRAY: |
|
1686 |
if (is_critical_native) { |
|
1687 |
unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]); |
|
1688 |
c_arg++; |
|
1689 |
#ifdef ASSERT |
|
1690 |
if (out_regs[c_arg].first()->is_Register()) { |
|
1691 |
reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true; |
|
1692 |
} else if (out_regs[c_arg].first()->is_FloatRegister()) { |
|
1693 |
freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true; |
|
1694 |
} |
|
1695 |
#endif |
|
1696 |
int_args++; |
|
1697 |
break; |
|
1698 |
} |
|
1699 |
case T_OBJECT: |
|
1700 |
assert(!is_critical_native, "no oop arguments"); |
|
1701 |
object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg], |
|
1702 |
((i == 0) && (!is_static)), |
|
1703 |
&receiver_offset); |
|
1704 |
int_args++; |
|
1705 |
break; |
|
1706 |
case T_VOID: |
|
1707 |
break; |
|
1708 |
||
1709 |
case T_FLOAT: |
|
1710 |
float_move(masm, in_regs[i], out_regs[c_arg]); |
|
1711 |
float_args++; |
|
1712 |
break; |
|
1713 |
||
1714 |
case T_DOUBLE: |
|
1715 |
assert( i + 1 < total_in_args && |
|
1716 |
in_sig_bt[i + 1] == T_VOID && |
|
1717 |
out_sig_bt[c_arg+1] == T_VOID, "bad arg list"); |
|
1718 |
double_move(masm, in_regs[i], out_regs[c_arg]); |
|
1719 |
float_args++; |
|
1720 |
break; |
|
1721 |
||
1722 |
case T_LONG : |
|
1723 |
long_move(masm, in_regs[i], out_regs[c_arg]); |
|
1724 |
int_args++; |
|
1725 |
break; |
|
1726 |
||
1727 |
case T_ADDRESS: assert(false, "found T_ADDRESS in java args"); |
|
1728 |
||
1729 |
default: |
|
1730 |
move32_64(masm, in_regs[i], out_regs[c_arg]); |
|
1731 |
int_args++; |
|
1732 |
} |
|
1733 |
} |
|
1734 |
||
1735 |
// point c_arg at the first arg that is already loaded in case we |
|
1736 |
// need to spill before we call out |
|
1737 |
int c_arg = total_c_args - total_in_args; |
|
1738 |
||
33084 | 1739 |
// Pre-load a static method's oop into c_rarg1. |
29183 | 1740 |
if (method->is_static() && !is_critical_native) { |
1741 |
||
1742 |
// load oop into a register |
|
33084 | 1743 |
__ movoop(c_rarg1, |
29183 | 1744 |
JNIHandles::make_local(method->method_holder()->java_mirror()), |
1745 |
/*immediate*/true); |
|
1746 |
||
1747 |
// Now handlize the static class mirror it's known not-null. |
|
33084 | 1748 |
__ str(c_rarg1, Address(sp, klass_offset)); |
29183 | 1749 |
map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); |
1750 |
||
1751 |
// Now get the handle |
|
33084 | 1752 |
__ lea(c_rarg1, Address(sp, klass_offset)); |
29183 | 1753 |
// and protect the arg if we must spill |
1754 |
c_arg--; |
|
1755 |
} |
|
1756 |
||
1757 |
// Change state to native (we save the return address in the thread, since it might not |
|
1758 |
// be pushed on the stack when we do a a stack traversal). It is enough that the pc() |
|
1759 |
// points into the right code segment. It does not have to be the correct return pc. |
|
1760 |
// We use the same pc/oopMap repeatedly when we call out |
|
1761 |
||
1762 |
intptr_t the_pc = (intptr_t) __ pc(); |
|
1763 |
oop_maps->add_gc_map(the_pc - start, map); |
|
1764 |
||
1765 |
__ set_last_Java_frame(sp, noreg, (address)the_pc, rscratch1); |
|
1766 |
||
33084 | 1767 |
Label dtrace_method_entry, dtrace_method_entry_done; |
29183 | 1768 |
{ |
33084 | 1769 |
unsigned long offset; |
1770 |
__ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset); |
|
1771 |
__ ldrb(rscratch1, Address(rscratch1, offset)); |
|
1772 |
__ cbnzw(rscratch1, dtrace_method_entry); |
|
1773 |
__ bind(dtrace_method_entry_done); |
|
29183 | 1774 |
} |
1775 |
||
1776 |
// RedefineClasses() tracing support for obsolete method entry |
|
1777 |
if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { |
|
1778 |
// protect the args we've loaded |
|
1779 |
save_args(masm, total_c_args, c_arg, out_regs); |
|
1780 |
__ mov_metadata(c_rarg1, method()); |
|
1781 |
__ call_VM_leaf( |
|
1782 |
CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), |
|
1783 |
rthread, c_rarg1); |
|
1784 |
restore_args(masm, total_c_args, c_arg, out_regs); |
|
1785 |
} |
|
1786 |
||
1787 |
// Lock a synchronized method |
|
1788 |
||
1789 |
// Register definitions used by locking and unlocking |
|
1790 |
||
1791 |
const Register swap_reg = r0; |
|
1792 |
const Register obj_reg = r19; // Will contain the oop |
|
1793 |
const Register lock_reg = r13; // Address of compiler lock object (BasicLock) |
|
1794 |
const Register old_hdr = r13; // value of old header at unlock time |
|
33472
4300fda0e8bb
8140611: aarch64: jtreg test jdk/tools/pack200/UnpackerMemoryTest.java SEGVs
enevill
parents:
33198
diff
changeset
|
1795 |
const Register tmp = lr; |
29183 | 1796 |
|
1797 |
Label slow_path_lock; |
|
1798 |
Label lock_done; |
|
1799 |
||
1800 |
if (method->is_synchronized()) { |
|
1801 |
assert(!is_critical_native, "unhandled"); |
|
1802 |
||
1803 |
const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes(); |
|
1804 |
||
1805 |
// Get the handle (the 2nd argument) |
|
1806 |
__ mov(oop_handle_reg, c_rarg1); |
|
1807 |
||
1808 |
// Get address of the box |
|
1809 |
||
1810 |
__ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size)); |
|
1811 |
||
1812 |
// Load the oop from the handle |
|
1813 |
__ ldr(obj_reg, Address(oop_handle_reg, 0)); |
|
1814 |
||
1815 |
if (UseBiasedLocking) { |
|
32395
13b0caf18153
8133352: aarch64: generates constrained unpredictable instructions
enevill
parents:
31411
diff
changeset
|
1816 |
__ biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, lock_done, &slow_path_lock); |
29183 | 1817 |
} |
1818 |
||
1819 |
// Load (object->mark() | 1) into swap_reg %r0 |
|
1820 |
__ ldr(rscratch1, Address(obj_reg, 0)); |
|
1821 |
__ orr(swap_reg, rscratch1, 1); |
|
1822 |
||
1823 |
// Save (object->mark() | 1) into BasicLock's displaced header |
|
1824 |
__ str(swap_reg, Address(lock_reg, mark_word_offset)); |
|
1825 |
||
1826 |
// src -> dest iff dest == r0 else r0 <- dest |
|
1827 |
{ Label here; |
|
1828 |
__ cmpxchgptr(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL); |
|
1829 |
} |
|
1830 |
||
1831 |
// Hmm should this move to the slow path code area??? |
|
1832 |
||
1833 |
// Test if the oopMark is an obvious stack pointer, i.e., |
|
1834 |
// 1) (mark & 3) == 0, and |
|
1835 |
// 2) sp <= mark < mark + os::pagesize() |
|
1836 |
// These 3 tests can be done by evaluating the following |
|
1837 |
// expression: ((mark - sp) & (3 - os::vm_page_size())), |
|
1838 |
// assuming both stack pointer and pagesize have their |
|
1839 |
// least significant 2 bits clear. |
|
1840 |
// NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg |
|
1841 |
||
1842 |
__ sub(swap_reg, sp, swap_reg); |
|
1843 |
__ neg(swap_reg, swap_reg); |
|
1844 |
__ ands(swap_reg, swap_reg, 3 - os::vm_page_size()); |
|
1845 |
||
1846 |
// Save the test result, for recursive case, the result is zero |
|
1847 |
__ str(swap_reg, Address(lock_reg, mark_word_offset)); |
|
1848 |
__ br(Assembler::NE, slow_path_lock); |
|
1849 |
||
1850 |
// Slow path will re-enter here |
|
1851 |
||
1852 |
__ bind(lock_done); |
|
1853 |
} |
|
1854 |
||
1855 |
||
1856 |
// Finally just about ready to make the JNI call |
|
1857 |
||
1858 |
// get JNIEnv* which is first argument to native |
|
1859 |
if (!is_critical_native) { |
|
1860 |
__ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset()))); |
|
1861 |
} |
|
1862 |
||
1863 |
// Now set thread in native |
|
1864 |
__ mov(rscratch1, _thread_in_native); |
|
29186
d5e61d9743aa
8069593: Changes to JavaThread::_thread_state must use acquire and release
aph
parents:
29183
diff
changeset
|
1865 |
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset())); |
d5e61d9743aa
8069593: Changes to JavaThread::_thread_state must use acquire and release
aph
parents:
29183
diff
changeset
|
1866 |
__ stlrw(rscratch1, rscratch2); |
29183 | 1867 |
|
1868 |
{ |
|
1869 |
int return_type = 0; |
|
1870 |
switch (ret_type) { |
|
1871 |
case T_VOID: break; |
|
1872 |
return_type = 0; break; |
|
1873 |
case T_CHAR: |
|
1874 |
case T_BYTE: |
|
1875 |
case T_SHORT: |
|
1876 |
case T_INT: |
|
1877 |
case T_BOOLEAN: |
|
1878 |
case T_LONG: |
|
1879 |
return_type = 1; break; |
|
1880 |
case T_ARRAY: |
|
1881 |
case T_OBJECT: |
|
1882 |
return_type = 1; break; |
|
1883 |
case T_FLOAT: |
|
1884 |
return_type = 2; break; |
|
1885 |
case T_DOUBLE: |
|
1886 |
return_type = 3; break; |
|
1887 |
default: |
|
1888 |
ShouldNotReachHere(); |
|
1889 |
} |
|
1890 |
rt_call(masm, native_func, |
|
1891 |
int_args + 2, // AArch64 passes up to 8 args in int registers |
|
1892 |
float_args, // and up to 8 float args |
|
1893 |
return_type); |
|
1894 |
} |
|
1895 |
||
1896 |
// Unpack native results. |
|
1897 |
switch (ret_type) { |
|
33084 | 1898 |
case T_BOOLEAN: __ ubfx(r0, r0, 0, 8); break; |
29183 | 1899 |
case T_CHAR : __ ubfx(r0, r0, 0, 16); break; |
33084 | 1900 |
case T_BYTE : __ sbfx(r0, r0, 0, 8); break; |
29183 | 1901 |
case T_SHORT : __ sbfx(r0, r0, 0, 16); break; |
1902 |
case T_INT : __ sbfx(r0, r0, 0, 32); break; |
|
1903 |
case T_DOUBLE : |
|
1904 |
case T_FLOAT : |
|
1905 |
// Result is in v0 we'll save as needed |
|
1906 |
break; |
|
1907 |
case T_ARRAY: // Really a handle |
|
1908 |
case T_OBJECT: // Really a handle |
|
1909 |
break; // can't de-handlize until after safepoint check |
|
1910 |
case T_VOID: break; |
|
1911 |
case T_LONG: break; |
|
1912 |
default : ShouldNotReachHere(); |
|
1913 |
} |
|
1914 |
||
1915 |
// Switch thread to "native transition" state before reading the synchronization state. |
|
1916 |
// This additional state is necessary because reading and testing the synchronization |
|
1917 |
// state is not atomic w.r.t. GC, as this scenario demonstrates: |
|
1918 |
// Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. |
|
1919 |
// VM thread changes sync state to synchronizing and suspends threads for GC. |
|
1920 |
// Thread A is resumed to finish this native method, but doesn't block here since it |
|
1921 |
// didn't see any synchronization is progress, and escapes. |
|
1922 |
__ mov(rscratch1, _thread_in_native_trans); |
|
1923 |
||
1924 |
if(os::is_MP()) { |
|
1925 |
if (UseMembar) { |
|
33084 | 1926 |
__ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset())); |
1927 |
||
29183 | 1928 |
// Force this write out before the read below |
1929 |
__ dmb(Assembler::SY); |
|
1930 |
} else { |
|
33084 | 1931 |
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset())); |
1932 |
__ stlrw(rscratch1, rscratch2); |
|
1933 |
||
29183 | 1934 |
// Write serialization page so VM thread can do a pseudo remote membar. |
1935 |
// We use the current thread pointer to calculate a thread specific |
|
1936 |
// offset to write to within the page. This minimizes bus traffic |
|
1937 |
// due to cache line collision. |
|
1938 |
__ serialize_memory(rthread, r2); |
|
1939 |
} |
|
1940 |
} |
|
1941 |
||
1942 |
// check for safepoint operation in progress and/or pending suspend requests |
|
33084 | 1943 |
Label safepoint_in_progress, safepoint_in_progress_done; |
29183 | 1944 |
{ |
33084 | 1945 |
assert(SafepointSynchronize::_not_synchronized == 0, "fix this code"); |
1946 |
unsigned long offset; |
|
1947 |
__ adrp(rscratch1, |
|
1948 |
ExternalAddress((address)SafepointSynchronize::address_of_state()), |
|
1949 |
offset); |
|
1950 |
__ ldrw(rscratch1, Address(rscratch1, offset)); |
|
1951 |
__ cbnzw(rscratch1, safepoint_in_progress); |
|
29183 | 1952 |
__ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset())); |
33084 | 1953 |
__ cbnzw(rscratch1, safepoint_in_progress); |
1954 |
__ bind(safepoint_in_progress_done); |
|
29183 | 1955 |
} |
1956 |
||
1957 |
// change thread state |
|
33084 | 1958 |
Label after_transition; |
29183 | 1959 |
__ mov(rscratch1, _thread_in_Java); |
29186
d5e61d9743aa
8069593: Changes to JavaThread::_thread_state must use acquire and release
aph
parents:
29183
diff
changeset
|
1960 |
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset())); |
d5e61d9743aa
8069593: Changes to JavaThread::_thread_state must use acquire and release
aph
parents:
29183
diff
changeset
|
1961 |
__ stlrw(rscratch1, rscratch2); |
29183 | 1962 |
__ bind(after_transition); |
1963 |
||
1964 |
Label reguard; |
|
1965 |
Label reguard_done; |
|
1966 |
__ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset())); |
|
35201
996db89f378e
8139864: Improve handling of stack protection zones.
goetz
parents:
34185
diff
changeset
|
1967 |
__ cmpw(rscratch1, JavaThread::stack_guard_yellow_reserved_disabled); |
29183 | 1968 |
__ br(Assembler::EQ, reguard); |
1969 |
__ bind(reguard_done); |
|
1970 |
||
1971 |
// native result if any is live |
|
1972 |
||
1973 |
// Unlock |
|
1974 |
Label unlock_done; |
|
1975 |
Label slow_path_unlock; |
|
1976 |
if (method->is_synchronized()) { |
|
1977 |
||
1978 |
// Get locked oop from the handle we passed to jni |
|
1979 |
__ ldr(obj_reg, Address(oop_handle_reg, 0)); |
|
1980 |
||
1981 |
Label done; |
|
1982 |
||
1983 |
if (UseBiasedLocking) { |
|
1984 |
__ biased_locking_exit(obj_reg, old_hdr, done); |
|
1985 |
} |
|
1986 |
||
1987 |
// Simple recursive lock? |
|
1988 |
||
1989 |
__ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size)); |
|
1990 |
__ cbz(rscratch1, done); |
|
1991 |
||
1992 |
// Must save r0 if if it is live now because cmpxchg must use it |
|
1993 |
if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) { |
|
1994 |
save_native_result(masm, ret_type, stack_slots); |
|
1995 |
} |
|
1996 |
||
1997 |
||
1998 |
// get address of the stack lock |
|
1999 |
__ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size)); |
|
2000 |
// get old displaced header |
|
2001 |
__ ldr(old_hdr, Address(r0, 0)); |
|
2002 |
||
2003 |
// Atomic swap old header if oop still contains the stack lock |
|
2004 |
Label succeed; |
|
2005 |
__ cmpxchgptr(r0, old_hdr, obj_reg, rscratch1, succeed, &slow_path_unlock); |
|
2006 |
__ bind(succeed); |
|
2007 |
||
2008 |
// slow path re-enters here |
|
2009 |
__ bind(unlock_done); |
|
2010 |
if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) { |
|
2011 |
restore_native_result(masm, ret_type, stack_slots); |
|
2012 |
} |
|
2013 |
||
2014 |
__ bind(done); |
|
2015 |
} |
|
33084 | 2016 |
|
2017 |
Label dtrace_method_exit, dtrace_method_exit_done; |
|
29183 | 2018 |
{ |
33084 | 2019 |
unsigned long offset; |
2020 |
__ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset); |
|
2021 |
__ ldrb(rscratch1, Address(rscratch1, offset)); |
|
2022 |
__ cbnzw(rscratch1, dtrace_method_exit); |
|
2023 |
__ bind(dtrace_method_exit_done); |
|
29183 | 2024 |
} |
2025 |
||
2026 |
__ reset_last_Java_frame(false, true); |
|
2027 |
||
2028 |
// Unpack oop result |
|
2029 |
if (ret_type == T_OBJECT || ret_type == T_ARRAY) { |
|
2030 |
Label L; |
|
2031 |
__ cbz(r0, L); |
|
2032 |
__ ldr(r0, Address(r0, 0)); |
|
2033 |
__ bind(L); |
|
2034 |
__ verify_oop(r0); |
|
2035 |
} |
|
2036 |
||
2037 |
if (!is_critical_native) { |
|
2038 |
// reset handle block |
|
2039 |
__ ldr(r2, Address(rthread, JavaThread::active_handles_offset())); |
|
2040 |
__ str(zr, Address(r2, JNIHandleBlock::top_offset_in_bytes())); |
|
2041 |
} |
|
2042 |
||
2043 |
__ leave(); |
|
2044 |
||
2045 |
if (!is_critical_native) { |
|
2046 |
// Any exception pending? |
|
2047 |
__ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset()))); |
|
2048 |
__ cbnz(rscratch1, exception_pending); |
|
2049 |
} |
|
2050 |
||
2051 |
// record exit from native wrapper code |
|
2052 |
if (NotifySimulator) { |
|
2053 |
__ notify(Assembler::method_reentry); |
|
2054 |
} |
|
2055 |
||
2056 |
// We're done |
|
2057 |
__ ret(lr); |
|
2058 |
||
2059 |
// Unexpected paths are out of line and go here |
|
2060 |
||
2061 |
if (!is_critical_native) { |
|
2062 |
// forward the exception |
|
2063 |
__ bind(exception_pending); |
|
2064 |
||
2065 |
// and forward the exception |
|
2066 |
__ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); |
|
2067 |
} |
|
2068 |
||
2069 |
// Slow path locking & unlocking |
|
2070 |
if (method->is_synchronized()) { |
|
2071 |
||
33084 | 2072 |
__ block_comment("Slow path lock {"); |
29183 | 2073 |
__ bind(slow_path_lock); |
2074 |
||
2075 |
// has last_Java_frame setup. No exceptions so do vanilla call not call_VM |
|
2076 |
// args are (oop obj, BasicLock* lock, JavaThread* thread) |
|
2077 |
||
2078 |
// protect the args we've loaded |
|
2079 |
save_args(masm, total_c_args, c_arg, out_regs); |
|
2080 |
||
2081 |
__ mov(c_rarg0, obj_reg); |
|
2082 |
__ mov(c_rarg1, lock_reg); |
|
2083 |
__ mov(c_rarg2, rthread); |
|
2084 |
||
2085 |
// Not a leaf but we have last_Java_frame setup as we want |
|
2086 |
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3); |
|
2087 |
restore_args(masm, total_c_args, c_arg, out_regs); |
|
2088 |
||
2089 |
#ifdef ASSERT |
|
2090 |
{ Label L; |
|
2091 |
__ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset()))); |
|
2092 |
__ cbz(rscratch1, L); |
|
2093 |
__ stop("no pending exception allowed on exit from monitorenter"); |
|
2094 |
__ bind(L); |
|
2095 |
} |
|
2096 |
#endif |
|
2097 |
__ b(lock_done); |
|
2098 |
||
33084 | 2099 |
__ block_comment("} Slow path lock"); |
2100 |
||
2101 |
__ block_comment("Slow path unlock {"); |
|
29183 | 2102 |
__ bind(slow_path_unlock); |
2103 |
||
2104 |
// If we haven't already saved the native result we must save it now as xmm registers |
|
2105 |
// are still exposed. |
|
2106 |
||
2107 |
if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) { |
|
2108 |
save_native_result(masm, ret_type, stack_slots); |
|
2109 |
} |
|
2110 |
||
31411
92e500124bca
8129757: ppc/aarch: Fix passing thread to runtime after "8073165: Contended Locking fast exit bucket."
goetz
parents:
30552
diff
changeset
|
2111 |
__ mov(c_rarg2, rthread); |
29183 | 2112 |
__ lea(c_rarg1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size)); |
2113 |
__ mov(c_rarg0, obj_reg); |
|
2114 |
||
2115 |
// Save pending exception around call to VM (which contains an EXCEPTION_MARK) |
|
2116 |
// NOTE that obj_reg == r19 currently |
|
2117 |
__ ldr(r19, Address(rthread, in_bytes(Thread::pending_exception_offset()))); |
|
2118 |
__ str(zr, Address(rthread, in_bytes(Thread::pending_exception_offset()))); |
|
2119 |
||
31411
92e500124bca
8129757: ppc/aarch: Fix passing thread to runtime after "8073165: Contended Locking fast exit bucket."
goetz
parents:
30552
diff
changeset
|
2120 |
rt_call(masm, CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), 3, 0, 1); |
29183 | 2121 |
|
2122 |
#ifdef ASSERT |
|
2123 |
{ |
|
2124 |
Label L; |
|
2125 |
__ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset()))); |
|
2126 |
__ cbz(rscratch1, L); |
|
2127 |
__ stop("no pending exception allowed on exit complete_monitor_unlocking_C"); |
|
2128 |
__ bind(L); |
|
2129 |
} |
|
2130 |
#endif /* ASSERT */ |
|
2131 |
||
2132 |
__ str(r19, Address(rthread, in_bytes(Thread::pending_exception_offset()))); |
|
2133 |
||
2134 |
if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) { |
|
2135 |
restore_native_result(masm, ret_type, stack_slots); |
|
2136 |
} |
|
2137 |
__ b(unlock_done); |
|
2138 |
||
33084 | 2139 |
__ block_comment("} Slow path unlock"); |
29183 | 2140 |
|
2141 |
} // synchronized |
|
2142 |
||
2143 |
// SLOW PATH Reguard the stack if needed |
|
2144 |
||
2145 |
__ bind(reguard); |
|
2146 |
save_native_result(masm, ret_type, stack_slots); |
|
2147 |
rt_call(masm, CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), 0, 0, 0); |
|
2148 |
restore_native_result(masm, ret_type, stack_slots); |
|
2149 |
// and continue |
|
2150 |
__ b(reguard_done); |
|
2151 |
||
33084 | 2152 |
// SLOW PATH safepoint |
2153 |
{ |
|
2154 |
__ block_comment("safepoint {"); |
|
2155 |
__ bind(safepoint_in_progress); |
|
2156 |
||
2157 |
// Don't use call_VM as it will see a possible pending exception and forward it |
|
2158 |
// and never return here preventing us from clearing _last_native_pc down below. |
|
2159 |
// |
|
2160 |
save_native_result(masm, ret_type, stack_slots); |
|
2161 |
__ mov(c_rarg0, rthread); |
|
2162 |
#ifndef PRODUCT |
|
2163 |
assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area"); |
|
2164 |
#endif |
|
2165 |
if (!is_critical_native) { |
|
2166 |
__ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); |
|
2167 |
} else { |
|
2168 |
__ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition))); |
|
2169 |
} |
|
2170 |
__ blrt(rscratch1, 1, 0, 1); |
|
2171 |
__ maybe_isb(); |
|
2172 |
// Restore any method result value |
|
2173 |
restore_native_result(masm, ret_type, stack_slots); |
|
2174 |
||
2175 |
if (is_critical_native) { |
|
2176 |
// The call above performed the transition to thread_in_Java so |
|
2177 |
// skip the transition logic above. |
|
2178 |
__ b(after_transition); |
|
2179 |
} |
|
2180 |
||
2181 |
__ b(safepoint_in_progress_done); |
|
2182 |
__ block_comment("} safepoint"); |
|
2183 |
} |
|
2184 |
||
2185 |
// SLOW PATH dtrace support |
|
2186 |
{ |
|
2187 |
__ block_comment("dtrace entry {"); |
|
2188 |
__ bind(dtrace_method_entry); |
|
2189 |
||
2190 |
// We have all of the arguments setup at this point. We must not touch any register |
|
2191 |
// argument registers at this point (what if we save/restore them there are no oop? |
|
2192 |
||
2193 |
save_args(masm, total_c_args, c_arg, out_regs); |
|
2194 |
__ mov_metadata(c_rarg1, method()); |
|
2195 |
__ call_VM_leaf( |
|
2196 |
CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), |
|
2197 |
rthread, c_rarg1); |
|
2198 |
restore_args(masm, total_c_args, c_arg, out_regs); |
|
2199 |
__ b(dtrace_method_entry_done); |
|
2200 |
__ block_comment("} dtrace entry"); |
|
2201 |
} |
|
2202 |
||
2203 |
{ |
|
2204 |
__ block_comment("dtrace exit {"); |
|
2205 |
__ bind(dtrace_method_exit); |
|
2206 |
save_native_result(masm, ret_type, stack_slots); |
|
2207 |
__ mov_metadata(c_rarg1, method()); |
|
2208 |
__ call_VM_leaf( |
|
2209 |
CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), |
|
2210 |
rthread, c_rarg1); |
|
2211 |
restore_native_result(masm, ret_type, stack_slots); |
|
2212 |
__ b(dtrace_method_exit_done); |
|
2213 |
__ block_comment("} dtrace exit"); |
|
2214 |
} |
|
29183 | 2215 |
|
2216 |
||
2217 |
__ flush(); |
|
2218 |
||
2219 |
nmethod *nm = nmethod::new_native_nmethod(method, |
|
2220 |
compile_id, |
|
2221 |
masm->code(), |
|
2222 |
vep_offset, |
|
2223 |
frame_complete, |
|
2224 |
stack_slots / VMRegImpl::slots_per_word, |
|
2225 |
(is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)), |
|
2226 |
in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size), |
|
2227 |
oop_maps); |
|
2228 |
||
2229 |
if (is_critical_native) { |
|
2230 |
nm->set_lazy_critical_native(true); |
|
2231 |
} |
|
2232 |
||
2233 |
return nm; |
|
2234 |
||
2235 |
} |
|
2236 |
||
2237 |
// this function returns the adjust size (in number of words) to a c2i adapter |
|
2238 |
// activation for use during deoptimization |
|
2239 |
int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) { |
|
2240 |
assert(callee_locals >= callee_parameters, |
|
2241 |
"test and remove; got more parms than locals"); |
|
2242 |
if (callee_locals < callee_parameters) |
|
2243 |
return 0; // No adjustment for negative locals |
|
2244 |
int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords; |
|
2245 |
// diff is counted in stack words |
|
2246 |
return round_to(diff, 2); |
|
2247 |
} |
|
2248 |
||
2249 |
||
2250 |
//------------------------------generate_deopt_blob---------------------------- |
|
2251 |
void SharedRuntime::generate_deopt_blob() { |
|
2252 |
// Allocate space for the code |
|
2253 |
ResourceMark rm; |
|
2254 |
// Setup code generation tools |
|
35148 | 2255 |
int pad = 0; |
2256 |
#if INCLUDE_JVMCI |
|
2257 |
if (EnableJVMCI) { |
|
2258 |
pad += 512; // Increase the buffer size when compiling for JVMCI |
|
2259 |
} |
|
2260 |
#endif |
|
2261 |
CodeBuffer buffer("deopt_blob", 2048+pad, 1024); |
|
29183 | 2262 |
MacroAssembler* masm = new MacroAssembler(&buffer); |
2263 |
int frame_size_in_words; |
|
2264 |
OopMap* map = NULL; |
|
2265 |
OopMapSet *oop_maps = new OopMapSet(); |
|
2266 |
||
2267 |
#ifdef BUILTIN_SIM |
|
2268 |
AArch64Simulator *simulator; |
|
2269 |
if (NotifySimulator) { |
|
2270 |
simulator = AArch64Simulator::get_current(UseSimulatorCache, DisableBCCheck); |
|
2271 |
simulator->notifyCompile(const_cast<char*>("SharedRuntime::deopt_blob"), __ pc()); |
|
2272 |
} |
|
2273 |
#endif |
|
2274 |
||
2275 |
// ------------- |
|
2276 |
// This code enters when returning to a de-optimized nmethod. A return |
|
2277 |
// address has been pushed on the the stack, and return values are in |
|
2278 |
// registers. |
|
2279 |
// If we are doing a normal deopt then we were called from the patched |
|
2280 |
// nmethod from the point we returned to the nmethod. So the return |
|
2281 |
// address on the stack is wrong by NativeCall::instruction_size |
|
2282 |
// We will adjust the value so it looks like we have the original return |
|
2283 |
// address on the stack (like when we eagerly deoptimized). |
|
2284 |
// In the case of an exception pending when deoptimizing, we enter |
|
2285 |
// with a return address on the stack that points after the call we patched |
|
2286 |
// into the exception handler. We have the following register state from, |
|
2287 |
// e.g., the forward exception stub (see stubGenerator_x86_64.cpp). |
|
2288 |
// r0: exception oop |
|
2289 |
// r19: exception handler |
|
2290 |
// r3: throwing pc |
|
2291 |
// So in this case we simply jam r3 into the useless return address and |
|
2292 |
// the stack looks just like we want. |
|
2293 |
// |
|
2294 |
// At this point we need to de-opt. We save the argument return |
|
2295 |
// registers. We call the first C routine, fetch_unroll_info(). This |
|
2296 |
// routine captures the return values and returns a structure which |
|
2297 |
// describes the current frame size and the sizes of all replacement frames. |
|
2298 |
// The current frame is compiled code and may contain many inlined |
|
2299 |
// functions, each with their own JVM state. We pop the current frame, then |
|
2300 |
// push all the new frames. Then we call the C routine unpack_frames() to |
|
2301 |
// populate these frames. Finally unpack_frames() returns us the new target |
|
2302 |
// address. Notice that callee-save registers are BLOWN here; they have |
|
2303 |
// already been captured in the vframeArray at the time the return PC was |
|
2304 |
// patched. |
|
2305 |
address start = __ pc(); |
|
2306 |
Label cont; |
|
2307 |
||
2308 |
// Prolog for non exception case! |
|
2309 |
||
2310 |
// Save everything in sight. |
|
2311 |
map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words); |
|
2312 |
||
2313 |
// Normal deoptimization. Save exec mode for unpack_frames. |
|
2314 |
__ movw(rcpool, Deoptimization::Unpack_deopt); // callee-saved |
|
2315 |
__ b(cont); |
|
2316 |
||
2317 |
int reexecute_offset = __ pc() - start; |
|
35148 | 2318 |
#if defined(INCLUDE_JVMCI) && !defined(COMPILER1) |
2319 |
if (EnableJVMCI && UseJVMCICompiler) { |
|
2320 |
// JVMCI does not use this kind of deoptimization |
|
2321 |
__ should_not_reach_here(); |
|
2322 |
} |
|
2323 |
#endif |
|
29183 | 2324 |
|
2325 |
// Reexecute case |
|
2326 |
// return address is the pc describes what bci to do re-execute at |
|
2327 |
||
2328 |
// No need to update map as each call to save_live_registers will produce identical oopmap |
|
2329 |
(void) RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words); |
|
2330 |
||
2331 |
__ movw(rcpool, Deoptimization::Unpack_reexecute); // callee-saved |
|
2332 |
__ b(cont); |
|
2333 |
||
35148 | 2334 |
#if INCLUDE_JVMCI |
2335 |
Label after_fetch_unroll_info_call; |
|
2336 |
int implicit_exception_uncommon_trap_offset = 0; |
|
2337 |
int uncommon_trap_offset = 0; |
|
2338 |
||
2339 |
if (EnableJVMCI) { |
|
2340 |
implicit_exception_uncommon_trap_offset = __ pc() - start; |
|
2341 |
||
2342 |
__ ldr(lr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset()))); |
|
2343 |
__ str(zr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset()))); |
|
2344 |
||
2345 |
uncommon_trap_offset = __ pc() - start; |
|
2346 |
||
2347 |
// Save everything in sight. |
|
2348 |
RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words); |
|
2349 |
// fetch_unroll_info needs to call last_java_frame() |
|
2350 |
Label retaddr; |
|
2351 |
__ set_last_Java_frame(sp, noreg, retaddr, rscratch1); |
|
2352 |
||
2353 |
__ ldrw(c_rarg1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset()))); |
|
2354 |
__ movw(rscratch1, -1); |
|
2355 |
__ strw(rscratch1, Address(rthread, in_bytes(JavaThread::pending_deoptimization_offset()))); |
|
2356 |
||
2357 |
__ movw(rcpool, (int32_t)Deoptimization::Unpack_reexecute); |
|
2358 |
__ mov(c_rarg0, rthread); |
|
2359 |
__ lea(rscratch1, |
|
2360 |
RuntimeAddress(CAST_FROM_FN_PTR(address, |
|
2361 |
Deoptimization::uncommon_trap))); |
|
2362 |
__ blrt(rscratch1, 2, 0, MacroAssembler::ret_type_integral); |
|
2363 |
__ bind(retaddr); |
|
2364 |
oop_maps->add_gc_map( __ pc()-start, map->deep_copy()); |
|
2365 |
||
2366 |
__ reset_last_Java_frame(false, false); |
|
2367 |
||
2368 |
__ b(after_fetch_unroll_info_call); |
|
2369 |
} // EnableJVMCI |
|
2370 |
#endif // INCLUDE_JVMCI |
|
2371 |
||
29183 | 2372 |
int exception_offset = __ pc() - start; |
2373 |
||
2374 |
// Prolog for exception case |
|
2375 |
||
2376 |
// all registers are dead at this entry point, except for r0, and |
|
2377 |
// r3 which contain the exception oop and exception pc |
|
2378 |
// respectively. Set them in TLS and fall thru to the |
|
2379 |
// unpack_with_exception_in_tls entry point. |
|
2380 |
||
2381 |
__ str(r3, Address(rthread, JavaThread::exception_pc_offset())); |
|
2382 |
__ str(r0, Address(rthread, JavaThread::exception_oop_offset())); |
|
2383 |
||
2384 |
int exception_in_tls_offset = __ pc() - start; |
|
2385 |
||
2386 |
// new implementation because exception oop is now passed in JavaThread |
|
2387 |
||
2388 |
// Prolog for exception case |
|
2389 |
// All registers must be preserved because they might be used by LinearScan |
|
2390 |
// Exceptiop oop and throwing PC are passed in JavaThread |
|
2391 |
// tos: stack at point of call to method that threw the exception (i.e. only |
|
2392 |
// args are on the stack, no return address) |
|
2393 |
||
2394 |
// The return address pushed by save_live_registers will be patched |
|
2395 |
// later with the throwing pc. The correct value is not available |
|
2396 |
// now because loading it from memory would destroy registers. |
|
2397 |
||
2398 |
// NB: The SP at this point must be the SP of the method that is |
|
2399 |
// being deoptimized. Deoptimization assumes that the frame created |
|
2400 |
// here by save_live_registers is immediately below the method's SP. |
|
2401 |
// This is a somewhat fragile mechanism. |
|
2402 |
||
2403 |
// Save everything in sight. |
|
2404 |
map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words); |
|
2405 |
||
2406 |
// Now it is safe to overwrite any register |
|
2407 |
||
2408 |
// Deopt during an exception. Save exec mode for unpack_frames. |
|
2409 |
__ mov(rcpool, Deoptimization::Unpack_exception); // callee-saved |
|
2410 |
||
2411 |
// load throwing pc from JavaThread and patch it as the return address |
|
2412 |
// of the current frame. Then clear the field in JavaThread |
|
2413 |
||
2414 |
__ ldr(r3, Address(rthread, JavaThread::exception_pc_offset())); |
|
2415 |
__ str(r3, Address(rfp, wordSize)); |
|
2416 |
__ str(zr, Address(rthread, JavaThread::exception_pc_offset())); |
|
2417 |
||
2418 |
#ifdef ASSERT |
|
2419 |
// verify that there is really an exception oop in JavaThread |
|
2420 |
__ ldr(r0, Address(rthread, JavaThread::exception_oop_offset())); |
|
2421 |
__ verify_oop(r0); |
|
2422 |
||
2423 |
// verify that there is no pending exception |
|
2424 |
Label no_pending_exception; |
|
2425 |
__ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); |
|
2426 |
__ cbz(rscratch1, no_pending_exception); |
|
2427 |
__ stop("must not have pending exception here"); |
|
2428 |
__ bind(no_pending_exception); |
|
2429 |
#endif |
|
2430 |
||
2431 |
__ bind(cont); |
|
2432 |
||
2433 |
// Call C code. Need thread and this frame, but NOT official VM entry |
|
2434 |
// crud. We cannot block on this call, no GC can happen. |
|
2435 |
// |
|
2436 |
// UnrollBlock* fetch_unroll_info(JavaThread* thread) |
|
2437 |
||
2438 |
// fetch_unroll_info needs to call last_java_frame(). |
|
2439 |
||
2440 |
Label retaddr; |
|
2441 |
__ set_last_Java_frame(sp, noreg, retaddr, rscratch1); |
|
2442 |
#ifdef ASSERT0 |
|
2443 |
{ Label L; |
|
2444 |
__ ldr(rscratch1, Address(rthread, |
|
2445 |
JavaThread::last_Java_fp_offset())); |
|
2446 |
__ cbz(rscratch1, L); |
|
2447 |
__ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared"); |
|
2448 |
__ bind(L); |
|
2449 |
} |
|
2450 |
#endif // ASSERT |
|
2451 |
__ mov(c_rarg0, rthread); |
|
34173
01bb07d23a5b
8141133: [JVMCI] crash during safepoint deopt if rethrow_exception is set
twisti
parents:
33472
diff
changeset
|
2452 |
__ mov(c_rarg1, rcpool); |
29183 | 2453 |
__ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info))); |
2454 |
__ blrt(rscratch1, 1, 0, 1); |
|
2455 |
__ bind(retaddr); |
|
2456 |
||
2457 |
// Need to have an oopmap that tells fetch_unroll_info where to |
|
2458 |
// find any register it might need. |
|
2459 |
oop_maps->add_gc_map(__ pc() - start, map); |
|
2460 |
||
2461 |
__ reset_last_Java_frame(false, true); |
|
2462 |
||
35148 | 2463 |
#if INCLUDE_JVMCI |
2464 |
if (EnableJVMCI) { |
|
2465 |
__ bind(after_fetch_unroll_info_call); |
|
2466 |
} |
|
2467 |
#endif |
|
2468 |
||
2469 |
// Load UnrollBlock* into r5 |
|
29183 | 2470 |
__ mov(r5, r0); |
2471 |
||
34173
01bb07d23a5b
8141133: [JVMCI] crash during safepoint deopt if rethrow_exception is set
twisti
parents:
33472
diff
changeset
|
2472 |
__ ldrw(rcpool, Address(r5, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes())); |
29183 | 2473 |
Label noException; |
2474 |
__ cmpw(rcpool, Deoptimization::Unpack_exception); // Was exception pending? |
|
2475 |
__ br(Assembler::NE, noException); |
|
2476 |
__ ldr(r0, Address(rthread, JavaThread::exception_oop_offset())); |
|
2477 |
// QQQ this is useless it was NULL above |
|
2478 |
__ ldr(r3, Address(rthread, JavaThread::exception_pc_offset())); |
|
2479 |
__ str(zr, Address(rthread, JavaThread::exception_oop_offset())); |
|
2480 |
__ str(zr, Address(rthread, JavaThread::exception_pc_offset())); |
|
2481 |
||
2482 |
__ verify_oop(r0); |
|
2483 |
||
2484 |
// Overwrite the result registers with the exception results. |
|
2485 |
__ str(r0, Address(sp, RegisterSaver::r0_offset_in_bytes())); |
|
2486 |
// I think this is useless |
|
2487 |
// __ str(r3, Address(sp, RegisterSaver::r3_offset_in_bytes())); |
|
2488 |
||
2489 |
__ bind(noException); |
|
2490 |
||
2491 |
// Only register save data is on the stack. |
|
2492 |
// Now restore the result registers. Everything else is either dead |
|
2493 |
// or captured in the vframeArray. |
|
2494 |
RegisterSaver::restore_result_registers(masm); |
|
2495 |
||
2496 |
// All of the register save area has been popped of the stack. Only the |
|
2497 |
// return address remains. |
|
2498 |
||
2499 |
// Pop all the frames we must move/replace. |
|
2500 |
// |
|
2501 |
// Frame picture (youngest to oldest) |
|
2502 |
// 1: self-frame (no frame link) |
|
2503 |
// 2: deopting frame (no frame link) |
|
2504 |
// 3: caller of deopting frame (could be compiled/interpreted). |
|
2505 |
// |
|
2506 |
// Note: by leaving the return address of self-frame on the stack |
|
2507 |
// and using the size of frame 2 to adjust the stack |
|
2508 |
// when we are done the return to frame 3 will still be on the stack. |
|
2509 |
||
2510 |
// Pop deoptimized frame |
|
2511 |
__ ldrw(r2, Address(r5, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes())); |
|
2512 |
__ sub(r2, r2, 2 * wordSize); |
|
2513 |
__ add(sp, sp, r2); |
|
2514 |
__ ldp(rfp, lr, __ post(sp, 2 * wordSize)); |
|
2515 |
// LR should now be the return address to the caller (3) |
|
2516 |
||
2517 |
#ifdef ASSERT |
|
2518 |
// Compilers generate code that bang the stack by as much as the |
|
2519 |
// interpreter would need. So this stack banging should never |
|
2520 |
// trigger a fault. Verify that it does not on non product builds. |
|
2521 |
if (UseStackBanging) { |
|
2522 |
__ ldrw(r19, Address(r5, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes())); |
|
2523 |
__ bang_stack_size(r19, r2); |
|
2524 |
} |
|
2525 |
#endif |
|
2526 |
// Load address of array of frame pcs into r2 |
|
2527 |
__ ldr(r2, Address(r5, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes())); |
|
2528 |
||
2529 |
// Trash the old pc |
|
2530 |
// __ addptr(sp, wordSize); FIXME ???? |
|
2531 |
||
2532 |
// Load address of array of frame sizes into r4 |
|
2533 |
__ ldr(r4, Address(r5, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes())); |
|
2534 |
||
2535 |
// Load counter into r3 |
|
2536 |
__ ldrw(r3, Address(r5, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes())); |
|
2537 |
||
2538 |
// Now adjust the caller's stack to make up for the extra locals |
|
2539 |
// but record the original sp so that we can save it in the skeletal interpreter |
|
2540 |
// frame and the stack walking of interpreter_sender will get the unextended sp |
|
2541 |
// value and not the "real" sp value. |
|
2542 |
||
2543 |
const Register sender_sp = r6; |
|
2544 |
||
2545 |
__ mov(sender_sp, sp); |
|
2546 |
__ ldrw(r19, Address(r5, |
|
2547 |
Deoptimization::UnrollBlock:: |
|
2548 |
caller_adjustment_offset_in_bytes())); |
|
2549 |
__ sub(sp, sp, r19); |
|
2550 |
||
2551 |
// Push interpreter frames in a loop |
|
2552 |
__ mov(rscratch1, (address)0xDEADDEAD); // Make a recognizable pattern |
|
2553 |
__ mov(rscratch2, rscratch1); |
|
2554 |
Label loop; |
|
2555 |
__ bind(loop); |
|
2556 |
__ ldr(r19, Address(__ post(r4, wordSize))); // Load frame size |
|
2557 |
__ sub(r19, r19, 2*wordSize); // We'll push pc and fp by hand |
|
2558 |
__ ldr(lr, Address(__ post(r2, wordSize))); // Load pc |
|
2559 |
__ enter(); // Save old & set new fp |
|
2560 |
__ sub(sp, sp, r19); // Prolog |
|
2561 |
// This value is corrected by layout_activation_impl |
|
2562 |
__ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); |
|
2563 |
__ str(sender_sp, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable |
|
2564 |
__ mov(sender_sp, sp); // Pass sender_sp to next frame |
|
2565 |
__ sub(r3, r3, 1); // Decrement counter |
|
2566 |
__ cbnz(r3, loop); |
|
2567 |
||
2568 |
// Re-push self-frame |
|
2569 |
__ ldr(lr, Address(r2)); |
|
2570 |
__ enter(); |
|
2571 |
||
2572 |
// Allocate a full sized register save area. We subtract 2 because |
|
2573 |
// enter() just pushed 2 words |
|
2574 |
__ sub(sp, sp, (frame_size_in_words - 2) * wordSize); |
|
2575 |
||
2576 |
// Restore frame locals after moving the frame |
|
2577 |
__ strd(v0, Address(sp, RegisterSaver::v0_offset_in_bytes())); |
|
2578 |
__ str(r0, Address(sp, RegisterSaver::r0_offset_in_bytes())); |
|
2579 |
||
2580 |
// Call C code. Need thread but NOT official VM entry |
|
2581 |
// crud. We cannot block on this call, no GC can happen. Call should |
|
2582 |
// restore return values to their stack-slots with the new SP. |
|
2583 |
// |
|
2584 |
// void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode) |
|
2585 |
||
2586 |
// Use rfp because the frames look interpreted now |
|
2587 |
// Don't need the precise return PC here, just precise enough to point into this code blob. |
|
2588 |
address the_pc = __ pc(); |
|
2589 |
__ set_last_Java_frame(sp, rfp, the_pc, rscratch1); |
|
2590 |
||
2591 |
__ mov(c_rarg0, rthread); |
|
2592 |
__ movw(c_rarg1, rcpool); // second arg: exec_mode |
|
2593 |
__ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames))); |
|
2594 |
__ blrt(rscratch1, 2, 0, 0); |
|
2595 |
||
2596 |
// Set an oopmap for the call site |
|
2597 |
// Use the same PC we used for the last java frame |
|
2598 |
oop_maps->add_gc_map(the_pc - start, |
|
2599 |
new OopMap( frame_size_in_words, 0 )); |
|
2600 |
||
2601 |
// Clear fp AND pc |
|
2602 |
__ reset_last_Java_frame(true, true); |
|
2603 |
||
2604 |
// Collect return values |
|
2605 |
__ ldrd(v0, Address(sp, RegisterSaver::v0_offset_in_bytes())); |
|
2606 |
__ ldr(r0, Address(sp, RegisterSaver::r0_offset_in_bytes())); |
|
2607 |
// I think this is useless (throwing pc?) |
|
2608 |
// __ ldr(r3, Address(sp, RegisterSaver::r3_offset_in_bytes())); |
|
2609 |
||
2610 |
// Pop self-frame. |
|
2611 |
__ leave(); // Epilog |
|
2612 |
||
2613 |
// Jump to interpreter |
|
2614 |
__ ret(lr); |
|
2615 |
||
2616 |
// Make sure all code is generated |
|
2617 |
masm->flush(); |
|
2618 |
||
2619 |
_deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words); |
|
2620 |
_deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset); |
|
35148 | 2621 |
#if INCLUDE_JVMCI |
2622 |
if (EnableJVMCI) { |
|
2623 |
_deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset); |
|
2624 |
_deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset); |
|
2625 |
} |
|
2626 |
#endif |
|
29183 | 2627 |
#ifdef BUILTIN_SIM |
2628 |
if (NotifySimulator) { |
|
2629 |
unsigned char *base = _deopt_blob->code_begin(); |
|
2630 |
simulator->notifyRelocate(start, base - start); |
|
2631 |
} |
|
2632 |
#endif |
|
2633 |
} |
|
2634 |
||
2635 |
uint SharedRuntime::out_preserve_stack_slots() { |
|
2636 |
return 0; |
|
2637 |
} |
|
2638 |
||
35148 | 2639 |
#if defined(COMPILER2) || INCLUDE_JVMCI |
29183 | 2640 |
//------------------------------generate_uncommon_trap_blob-------------------- |
2641 |
void SharedRuntime::generate_uncommon_trap_blob() { |
|
2642 |
// Allocate space for the code |
|
2643 |
ResourceMark rm; |
|
2644 |
// Setup code generation tools |
|
2645 |
CodeBuffer buffer("uncommon_trap_blob", 2048, 1024); |
|
2646 |
MacroAssembler* masm = new MacroAssembler(&buffer); |
|
2647 |
||
2648 |
#ifdef BUILTIN_SIM |
|
2649 |
AArch64Simulator *simulator; |
|
2650 |
if (NotifySimulator) { |
|
2651 |
simulator = AArch64Simulator::get_current(UseSimulatorCache, DisableBCCheck); |
|
2652 |
simulator->notifyCompile(const_cast<char*>("SharedRuntime:uncommon_trap_blob"), __ pc()); |
|
2653 |
} |
|
2654 |
#endif |
|
2655 |
||
2656 |
assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned"); |
|
2657 |
||
2658 |
address start = __ pc(); |
|
2659 |
||
2660 |
// Push self-frame. We get here with a return address in LR |
|
2661 |
// and sp should be 16 byte aligned |
|
2662 |
// push rfp and retaddr by hand |
|
2663 |
__ stp(rfp, lr, Address(__ pre(sp, -2 * wordSize))); |
|
2664 |
// we don't expect an arg reg save area |
|
2665 |
#ifndef PRODUCT |
|
2666 |
assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area"); |
|
2667 |
#endif |
|
2668 |
// compiler left unloaded_class_index in j_rarg0 move to where the |
|
2669 |
// runtime expects it. |
|
2670 |
if (c_rarg1 != j_rarg0) { |
|
2671 |
__ movw(c_rarg1, j_rarg0); |
|
2672 |
} |
|
2673 |
||
2674 |
// we need to set the past SP to the stack pointer of the stub frame |
|
2675 |
// and the pc to the address where this runtime call will return |
|
2676 |
// although actually any pc in this code blob will do). |
|
2677 |
Label retaddr; |
|
2678 |
__ set_last_Java_frame(sp, noreg, retaddr, rscratch1); |
|
2679 |
||
2680 |
// Call C code. Need thread but NOT official VM entry |
|
2681 |
// crud. We cannot block on this call, no GC can happen. Call should |
|
2682 |
// capture callee-saved registers as well as return values. |
|
2683 |
// Thread is in rdi already. |
|
2684 |
// |
|
2685 |
// UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index); |
|
2686 |
// |
|
2687 |
// n.b. 2 gp args, 0 fp args, integral return type |
|
2688 |
||
2689 |
__ mov(c_rarg0, rthread); |
|
34173
01bb07d23a5b
8141133: [JVMCI] crash during safepoint deopt if rethrow_exception is set
twisti
parents:
33472
diff
changeset
|
2690 |
__ movw(c_rarg2, (unsigned)Deoptimization::Unpack_uncommon_trap); |
29183 | 2691 |
__ lea(rscratch1, |
2692 |
RuntimeAddress(CAST_FROM_FN_PTR(address, |
|
2693 |
Deoptimization::uncommon_trap))); |
|
2694 |
__ blrt(rscratch1, 2, 0, MacroAssembler::ret_type_integral); |
|
2695 |
__ bind(retaddr); |
|
2696 |
||
2697 |
// Set an oopmap for the call site |
|
2698 |
OopMapSet* oop_maps = new OopMapSet(); |
|
2699 |
OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0); |
|
2700 |
||
2701 |
// location of rfp is known implicitly by the frame sender code |
|
2702 |
||
2703 |
oop_maps->add_gc_map(__ pc() - start, map); |
|
2704 |
||
2705 |
__ reset_last_Java_frame(false, true); |
|
2706 |
||
2707 |
// move UnrollBlock* into r4 |
|
2708 |
__ mov(r4, r0); |
|
2709 |
||
34173
01bb07d23a5b
8141133: [JVMCI] crash during safepoint deopt if rethrow_exception is set
twisti
parents:
33472
diff
changeset
|
2710 |
#ifdef ASSERT |
01bb07d23a5b
8141133: [JVMCI] crash during safepoint deopt if rethrow_exception is set
twisti
parents:
33472
diff
changeset
|
2711 |
{ Label L; |
01bb07d23a5b
8141133: [JVMCI] crash during safepoint deopt if rethrow_exception is set
twisti
parents:
33472
diff
changeset
|
2712 |
__ ldrw(rscratch1, Address(r4, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes())); |
01bb07d23a5b
8141133: [JVMCI] crash during safepoint deopt if rethrow_exception is set
twisti
parents:
33472
diff
changeset
|
2713 |
__ cmpw(rscratch1, (unsigned)Deoptimization::Unpack_uncommon_trap); |
01bb07d23a5b
8141133: [JVMCI] crash during safepoint deopt if rethrow_exception is set
twisti
parents:
33472
diff
changeset
|
2714 |
__ br(Assembler::EQ, L); |
01bb07d23a5b
8141133: [JVMCI] crash during safepoint deopt if rethrow_exception is set
twisti
parents:
33472
diff
changeset
|
2715 |
__ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared"); |
01bb07d23a5b
8141133: [JVMCI] crash during safepoint deopt if rethrow_exception is set
twisti
parents:
33472
diff
changeset
|
2716 |
__ bind(L); |
01bb07d23a5b
8141133: [JVMCI] crash during safepoint deopt if rethrow_exception is set
twisti
parents:
33472
diff
changeset
|
2717 |
} |
01bb07d23a5b
8141133: [JVMCI] crash during safepoint deopt if rethrow_exception is set
twisti
parents:
33472
diff
changeset
|
2718 |
#endif |
01bb07d23a5b
8141133: [JVMCI] crash during safepoint deopt if rethrow_exception is set
twisti
parents:
33472
diff
changeset
|
2719 |
|
29183 | 2720 |
// Pop all the frames we must move/replace. |
2721 |
// |
|
2722 |
// Frame picture (youngest to oldest) |
|
2723 |
// 1: self-frame (no frame link) |
|
2724 |
// 2: deopting frame (no frame link) |
|
2725 |
// 3: caller of deopting frame (could be compiled/interpreted). |
|
2726 |
||
2727 |
// Pop self-frame. We have no frame, and must rely only on r0 and sp. |
|
2728 |
__ add(sp, sp, (SimpleRuntimeFrame::framesize) << LogBytesPerInt); // Epilog! |
|
2729 |
||
2730 |
// Pop deoptimized frame (int) |
|
2731 |
__ ldrw(r2, Address(r4, |
|
2732 |
Deoptimization::UnrollBlock:: |
|
2733 |
size_of_deoptimized_frame_offset_in_bytes())); |
|
2734 |
__ sub(r2, r2, 2 * wordSize); |
|
2735 |
__ add(sp, sp, r2); |
|
2736 |
__ ldp(rfp, lr, __ post(sp, 2 * wordSize)); |
|
2737 |
// LR should now be the return address to the caller (3) frame |
|
2738 |
||
2739 |
#ifdef ASSERT |
|
2740 |
// Compilers generate code that bang the stack by as much as the |
|
2741 |
// interpreter would need. So this stack banging should never |
|
2742 |
// trigger a fault. Verify that it does not on non product builds. |
|
2743 |
if (UseStackBanging) { |
|
2744 |
__ ldrw(r1, Address(r4, |
|
2745 |
Deoptimization::UnrollBlock:: |
|
2746 |
total_frame_sizes_offset_in_bytes())); |
|
2747 |
__ bang_stack_size(r1, r2); |
|
2748 |
} |
|
2749 |
#endif |
|
2750 |
||
2751 |
// Load address of array of frame pcs into r2 (address*) |
|
2752 |
__ ldr(r2, Address(r4, |
|
2753 |
Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes())); |
|
2754 |
||
2755 |
// Load address of array of frame sizes into r5 (intptr_t*) |
|
2756 |
__ ldr(r5, Address(r4, |
|
2757 |
Deoptimization::UnrollBlock:: |
|
2758 |
frame_sizes_offset_in_bytes())); |
|
2759 |
||
2760 |
// Counter |
|
2761 |
__ ldrw(r3, Address(r4, |
|
2762 |
Deoptimization::UnrollBlock:: |
|
2763 |
number_of_frames_offset_in_bytes())); // (int) |
|
2764 |
||
2765 |
// Now adjust the caller's stack to make up for the extra locals but |
|
2766 |
// record the original sp so that we can save it in the skeletal |
|
2767 |
// interpreter frame and the stack walking of interpreter_sender |
|
2768 |
// will get the unextended sp value and not the "real" sp value. |
|
2769 |
||
2770 |
const Register sender_sp = r8; |
|
2771 |
||
2772 |
__ mov(sender_sp, sp); |
|
2773 |
__ ldrw(r1, Address(r4, |
|
2774 |
Deoptimization::UnrollBlock:: |
|
2775 |
caller_adjustment_offset_in_bytes())); // (int) |
|
2776 |
__ sub(sp, sp, r1); |
|
2777 |
||
2778 |
// Push interpreter frames in a loop |
|
2779 |
Label loop; |
|
2780 |
__ bind(loop); |
|
2781 |
__ ldr(r1, Address(r5, 0)); // Load frame size |
|
2782 |
__ sub(r1, r1, 2 * wordSize); // We'll push pc and rfp by hand |
|
2783 |
__ ldr(lr, Address(r2, 0)); // Save return address |
|
2784 |
__ enter(); // and old rfp & set new rfp |
|
2785 |
__ sub(sp, sp, r1); // Prolog |
|
2786 |
__ str(sender_sp, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize)); // Make it walkable |
|
2787 |
// This value is corrected by layout_activation_impl |
|
2788 |
__ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize)); |
|
2789 |
__ mov(sender_sp, sp); // Pass sender_sp to next frame |
|
2790 |
__ add(r5, r5, wordSize); // Bump array pointer (sizes) |
|
2791 |
__ add(r2, r2, wordSize); // Bump array pointer (pcs) |
|
2792 |
__ subsw(r3, r3, 1); // Decrement counter |
|
2793 |
__ br(Assembler::GT, loop); |
|
2794 |
__ ldr(lr, Address(r2, 0)); // save final return address |
|
2795 |
// Re-push self-frame |
|
2796 |
__ enter(); // & old rfp & set new rfp |
|
2797 |
||
2798 |
// Use rfp because the frames look interpreted now |
|
2799 |
// Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP. |
|
2800 |
// Don't need the precise return PC here, just precise enough to point into this code blob. |
|
2801 |
address the_pc = __ pc(); |
|
2802 |
__ set_last_Java_frame(sp, rfp, the_pc, rscratch1); |
|
2803 |
||
2804 |
// Call C code. Need thread but NOT official VM entry |
|
2805 |
// crud. We cannot block on this call, no GC can happen. Call should |
|
2806 |
// restore return values to their stack-slots with the new SP. |
|
2807 |
// Thread is in rdi already. |
|
2808 |
// |
|
2809 |
// BasicType unpack_frames(JavaThread* thread, int exec_mode); |
|
2810 |
// |
|
2811 |
// n.b. 2 gp args, 0 fp args, integral return type |
|
2812 |
||
2813 |
// sp should already be aligned |
|
2814 |
__ mov(c_rarg0, rthread); |
|
2815 |
__ movw(c_rarg1, (unsigned)Deoptimization::Unpack_uncommon_trap); |
|
2816 |
__ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames))); |
|
2817 |
__ blrt(rscratch1, 2, 0, MacroAssembler::ret_type_integral); |
|
2818 |
||
2819 |
// Set an oopmap for the call site |
|
2820 |
// Use the same PC we used for the last java frame |
|
2821 |
oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0)); |
|
2822 |
||
2823 |
// Clear fp AND pc |
|
2824 |
__ reset_last_Java_frame(true, true); |
|
2825 |
||
2826 |
// Pop self-frame. |
|
2827 |
__ leave(); // Epilog |
|
2828 |
||
2829 |
// Jump to interpreter |
|
2830 |
__ ret(lr); |
|
2831 |
||
2832 |
// Make sure all code is generated |
|
2833 |
masm->flush(); |
|
2834 |
||
2835 |
_uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, |
|
2836 |
SimpleRuntimeFrame::framesize >> 1); |
|
2837 |
||
2838 |
#ifdef BUILTIN_SIM |
|
2839 |
if (NotifySimulator) { |
|
2840 |
unsigned char *base = _deopt_blob->code_begin(); |
|
2841 |
simulator->notifyRelocate(start, base - start); |
|
2842 |
} |
|
2843 |
#endif |
|
2844 |
} |
|
2845 |
#endif // COMPILER2 |
|
2846 |
||
2847 |
||
2848 |
//------------------------------generate_handler_blob------ |
|
2849 |
// |
|
2850 |
// Generate a special Compile2Runtime blob that saves all registers, |
|
2851 |
// and setup oopmap. |
|
2852 |
// |
|
2853 |
SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) { |
|
2854 |
ResourceMark rm; |
|
2855 |
OopMapSet *oop_maps = new OopMapSet(); |
|
2856 |
OopMap* map; |
|
2857 |
||
2858 |
// Allocate space for the code. Setup code generation tools. |
|
2859 |
CodeBuffer buffer("handler_blob", 2048, 1024); |
|
2860 |
MacroAssembler* masm = new MacroAssembler(&buffer); |
|
2861 |
||
2862 |
address start = __ pc(); |
|
2863 |
address call_pc = NULL; |
|
2864 |
int frame_size_in_words; |
|
2865 |
bool cause_return = (poll_type == POLL_AT_RETURN); |
|
2866 |
bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP); |
|
2867 |
||
2868 |
// Save registers, fpu state, and flags |
|
33061
69a83b5ce390
8136524: aarch64: test/compiler/runtime/7196199/Test7196199.java fails
enevill
parents:
32395
diff
changeset
|
2869 |
map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, save_vectors); |
29183 | 2870 |
|
2871 |
// The following is basically a call_VM. However, we need the precise |
|
2872 |
// address of the call in order to generate an oopmap. Hence, we do all the |
|
2873 |
// work outselves. |
|
2874 |
||
2875 |
Label retaddr; |
|
2876 |
__ set_last_Java_frame(sp, noreg, retaddr, rscratch1); |
|
2877 |
||
2878 |
// The return address must always be correct so that frame constructor never |
|
2879 |
// sees an invalid pc. |
|
2880 |
||
2881 |
if (!cause_return) { |
|
2882 |
// overwrite the return address pushed by save_live_registers |
|
2883 |
__ ldr(c_rarg0, Address(rthread, JavaThread::saved_exception_pc_offset())); |
|
2884 |
__ str(c_rarg0, Address(rfp, wordSize)); |
|
2885 |
} |
|
2886 |
||
2887 |
// Do the call |
|
2888 |
__ mov(c_rarg0, rthread); |
|
2889 |
__ lea(rscratch1, RuntimeAddress(call_ptr)); |
|
2890 |
__ blrt(rscratch1, 1, 0, 1); |
|
2891 |
__ bind(retaddr); |
|
2892 |
||
2893 |
// Set an oopmap for the call site. This oopmap will map all |
|
2894 |
// oop-registers and debug-info registers as callee-saved. This |
|
2895 |
// will allow deoptimization at this safepoint to find all possible |
|
2896 |
// debug-info recordings, as well as let GC find all oops. |
|
2897 |
||
2898 |
oop_maps->add_gc_map( __ pc() - start, map); |
|
2899 |
||
2900 |
Label noException; |
|
2901 |
||
2902 |
__ reset_last_Java_frame(false, true); |
|
2903 |
||
2904 |
__ maybe_isb(); |
|
2905 |
__ membar(Assembler::LoadLoad | Assembler::LoadStore); |
|
2906 |
||
2907 |
__ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); |
|
2908 |
__ cbz(rscratch1, noException); |
|
2909 |
||
2910 |
// Exception pending |
|
2911 |
||
2912 |
RegisterSaver::restore_live_registers(masm); |
|
2913 |
||
2914 |
__ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); |
|
2915 |
||
2916 |
// No exception case |
|
2917 |
__ bind(noException); |
|
2918 |
||
2919 |
// Normal exit, restore registers and exit. |
|
33061
69a83b5ce390
8136524: aarch64: test/compiler/runtime/7196199/Test7196199.java fails
enevill
parents:
32395
diff
changeset
|
2920 |
RegisterSaver::restore_live_registers(masm, save_vectors); |
29183 | 2921 |
|
2922 |
__ ret(lr); |
|
2923 |
||
2924 |
// Make sure all code is generated |
|
2925 |
masm->flush(); |
|
2926 |
||
2927 |
// Fill-out other meta info |
|
2928 |
return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words); |
|
2929 |
} |
|
2930 |
||
2931 |
// |
|
2932 |
// generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss |
|
2933 |
// |
|
2934 |
// Generate a stub that calls into vm to find out the proper destination |
|
2935 |
// of a java call. All the argument registers are live at this point |
|
2936 |
// but since this is generic code we don't know what they are and the caller |
|
2937 |
// must do any gc of the args. |
|
2938 |
// |
|
2939 |
RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) { |
|
2940 |
assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); |
|
2941 |
||
2942 |
// allocate space for the code |
|
2943 |
ResourceMark rm; |
|
2944 |
||
2945 |
CodeBuffer buffer(name, 1000, 512); |
|
2946 |
MacroAssembler* masm = new MacroAssembler(&buffer); |
|
2947 |
||
2948 |
int frame_size_in_words; |
|
2949 |
||
2950 |
OopMapSet *oop_maps = new OopMapSet(); |
|
2951 |
OopMap* map = NULL; |
|
2952 |
||
2953 |
int start = __ offset(); |
|
2954 |
||
2955 |
map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words); |
|
2956 |
||
2957 |
int frame_complete = __ offset(); |
|
2958 |
||
2959 |
{ |
|
2960 |
Label retaddr; |
|
2961 |
__ set_last_Java_frame(sp, noreg, retaddr, rscratch1); |
|
2962 |
||
2963 |
__ mov(c_rarg0, rthread); |
|
2964 |
__ lea(rscratch1, RuntimeAddress(destination)); |
|
2965 |
||
2966 |
__ blrt(rscratch1, 1, 0, 1); |
|
2967 |
__ bind(retaddr); |
|
2968 |
} |
|
2969 |
||
2970 |
// Set an oopmap for the call site. |
|
2971 |
// We need this not only for callee-saved registers, but also for volatile |
|
2972 |
// registers that the compiler might be keeping live across a safepoint. |
|
2973 |
||
2974 |
oop_maps->add_gc_map( __ offset() - start, map); |
|
2975 |
||
2976 |
__ maybe_isb(); |
|
2977 |
||
2978 |
// r0 contains the address we are going to jump to assuming no exception got installed |
|
2979 |
||
2980 |
// clear last_Java_sp |
|
2981 |
__ reset_last_Java_frame(false, true); |
|
2982 |
// check for pending exceptions |
|
2983 |
Label pending; |
|
2984 |
__ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); |
|
2985 |
__ cbnz(rscratch1, pending); |
|
2986 |
||
2987 |
// get the returned Method* |
|
2988 |
__ get_vm_result_2(rmethod, rthread); |
|
2989 |
__ str(rmethod, Address(sp, RegisterSaver::reg_offset_in_bytes(rmethod))); |
|
2990 |
||
2991 |
// r0 is where we want to jump, overwrite rscratch1 which is saved and scratch |
|
2992 |
__ str(r0, Address(sp, RegisterSaver::rscratch1_offset_in_bytes())); |
|
2993 |
RegisterSaver::restore_live_registers(masm); |
|
2994 |
||
2995 |
// We are back the the original state on entry and ready to go. |
|
2996 |
||
2997 |
__ br(rscratch1); |
|
2998 |
||
2999 |
// Pending exception after the safepoint |
|
3000 |
||
3001 |
__ bind(pending); |
|
3002 |
||
3003 |
RegisterSaver::restore_live_registers(masm); |
|
3004 |
||
3005 |
// exception pending => remove activation and forward to exception handler |
|
3006 |
||
3007 |
__ str(zr, Address(rthread, JavaThread::vm_result_offset())); |
|
3008 |
||
3009 |
__ ldr(r0, Address(rthread, Thread::pending_exception_offset())); |
|
3010 |
__ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); |
|
3011 |
||
3012 |
// ------------- |
|
3013 |
// make sure all code is generated |
|
3014 |
masm->flush(); |
|
3015 |
||
3016 |
// return the blob |
|
3017 |
// frame_size_words or bytes?? |
|
3018 |
return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true); |
|
3019 |
} |
|
3020 |
||
3021 |
||
35148 | 3022 |
#if defined(COMPILER2) || INCLUDE_JVMCI |
29183 | 3023 |
// This is here instead of runtime_x86_64.cpp because it uses SimpleRuntimeFrame |
3024 |
// |
|
3025 |
//------------------------------generate_exception_blob--------------------------- |
|
3026 |
// creates exception blob at the end |
|
3027 |
// Using exception blob, this code is jumped from a compiled method. |
|
3028 |
// (see emit_exception_handler in x86_64.ad file) |
|
3029 |
// |
|
3030 |
// Given an exception pc at a call we call into the runtime for the |
|
3031 |
// handler in this method. This handler might merely restore state |
|
3032 |
// (i.e. callee save registers) unwind the frame and jump to the |
|
3033 |
// exception handler for the nmethod if there is no Java level handler |
|
3034 |
// for the nmethod. |
|
3035 |
// |
|
3036 |
// This code is entered with a jmp. |
|
3037 |
// |
|
3038 |
// Arguments: |
|
3039 |
// r0: exception oop |
|
3040 |
// r3: exception pc |
|
3041 |
// |
|
3042 |
// Results: |
|
3043 |
// r0: exception oop |
|
3044 |
// r3: exception pc in caller or ??? |
|
3045 |
// destination: exception handler of caller |
|
3046 |
// |
|
3047 |
// Note: the exception pc MUST be at a call (precise debug information) |
|
3048 |
// Registers r0, r3, r2, r4, r5, r8-r11 are not callee saved. |
|
3049 |
// |
|
3050 |
||
3051 |
void OptoRuntime::generate_exception_blob() { |
|
3052 |
assert(!OptoRuntime::is_callee_saved_register(R3_num), ""); |
|
3053 |
assert(!OptoRuntime::is_callee_saved_register(R0_num), ""); |
|
3054 |
assert(!OptoRuntime::is_callee_saved_register(R2_num), ""); |
|
3055 |
||
3056 |
assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned"); |
|
3057 |
||
3058 |
// Allocate space for the code |
|
3059 |
ResourceMark rm; |
|
3060 |
// Setup code generation tools |
|
3061 |
CodeBuffer buffer("exception_blob", 2048, 1024); |
|
3062 |
MacroAssembler* masm = new MacroAssembler(&buffer); |
|
3063 |
||
3064 |
// TODO check various assumptions made here |
|
3065 |
// |
|
3066 |
// make sure we do so before running this |
|
3067 |
||
3068 |
address start = __ pc(); |
|
3069 |
||
3070 |
// push rfp and retaddr by hand |
|
3071 |
// Exception pc is 'return address' for stack walker |
|
3072 |
__ stp(rfp, lr, Address(__ pre(sp, -2 * wordSize))); |
|
3073 |
// there are no callee save registers and we don't expect an |
|
3074 |
// arg reg save area |
|
3075 |
#ifndef PRODUCT |
|
3076 |
assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area"); |
|
3077 |
#endif |
|
3078 |
// Store exception in Thread object. We cannot pass any arguments to the |
|
3079 |
// handle_exception call, since we do not want to make any assumption |
|
3080 |
// about the size of the frame where the exception happened in. |
|
3081 |
__ str(r0, Address(rthread, JavaThread::exception_oop_offset())); |
|
3082 |
__ str(r3, Address(rthread, JavaThread::exception_pc_offset())); |
|
3083 |
||
3084 |
// This call does all the hard work. It checks if an exception handler |
|
3085 |
// exists in the method. |
|
3086 |
// If so, it returns the handler address. |
|
3087 |
// If not, it prepares for stack-unwinding, restoring the callee-save |
|
3088 |
// registers of the frame being removed. |
|
3089 |
// |
|
3090 |
// address OptoRuntime::handle_exception_C(JavaThread* thread) |
|
3091 |
// |
|
3092 |
// n.b. 1 gp arg, 0 fp args, integral return type |
|
3093 |
||
3094 |
// the stack should always be aligned |
|
3095 |
address the_pc = __ pc(); |
|
3096 |
__ set_last_Java_frame(sp, noreg, the_pc, rscratch1); |
|
3097 |
__ mov(c_rarg0, rthread); |
|
3098 |
__ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C))); |
|
3099 |
__ blrt(rscratch1, 1, 0, MacroAssembler::ret_type_integral); |
|
3100 |
__ maybe_isb(); |
|
3101 |
||
3102 |
// Set an oopmap for the call site. This oopmap will only be used if we |
|
3103 |
// are unwinding the stack. Hence, all locations will be dead. |
|
3104 |
// Callee-saved registers will be the same as the frame above (i.e., |
|
3105 |
// handle_exception_stub), since they were restored when we got the |
|
3106 |
// exception. |
|
3107 |
||
3108 |
OopMapSet* oop_maps = new OopMapSet(); |
|
3109 |
||
3110 |
oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0)); |
|
3111 |
||
3112 |
__ reset_last_Java_frame(false, true); |
|
3113 |
||
3114 |
// Restore callee-saved registers |
|
3115 |
||
3116 |
// rfp is an implicitly saved callee saved register (i.e. the calling |
|
3117 |
// convention will save restore it in prolog/epilog) Other than that |
|
3118 |
// there are no callee save registers now that adapter frames are gone. |
|
3119 |
// and we dont' expect an arg reg save area |
|
3120 |
__ ldp(rfp, r3, Address(__ post(sp, 2 * wordSize))); |
|
3121 |
||
3122 |
// r0: exception handler |
|
3123 |
||
3124 |
// We have a handler in r0 (could be deopt blob). |
|
3125 |
__ mov(r8, r0); |
|
3126 |
||
3127 |
// Get the exception oop |
|
3128 |
__ ldr(r0, Address(rthread, JavaThread::exception_oop_offset())); |
|
3129 |
// Get the exception pc in case we are deoptimized |
|
3130 |
__ ldr(r4, Address(rthread, JavaThread::exception_pc_offset())); |
|
3131 |
#ifdef ASSERT |
|
3132 |
__ str(zr, Address(rthread, JavaThread::exception_handler_pc_offset())); |
|
3133 |
__ str(zr, Address(rthread, JavaThread::exception_pc_offset())); |
|
3134 |
#endif |
|
3135 |
// Clear the exception oop so GC no longer processes it as a root. |
|
3136 |
__ str(zr, Address(rthread, JavaThread::exception_oop_offset())); |
|
3137 |
||
3138 |
// r0: exception oop |
|
3139 |
// r8: exception handler |
|
3140 |
// r4: exception pc |
|
3141 |
// Jump to handler |
|
3142 |
||
3143 |
__ br(r8); |
|
3144 |
||
3145 |
// Make sure all code is generated |
|
3146 |
masm->flush(); |
|
3147 |
||
3148 |
// Set exception blob |
|
3149 |
_exception_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1); |
|
3150 |
} |
|
3151 |
#endif // COMPILER2 |