35085
|
1 |
/*
|
|
2 |
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
|
35594
|
3 |
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
|
35085
|
4 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
5 |
*
|
|
6 |
* This code is free software; you can redistribute it and/or modify it
|
|
7 |
* under the terms of the GNU General Public License version 2 only, as
|
|
8 |
* published by the Free Software Foundation.
|
|
9 |
*
|
|
10 |
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
11 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
12 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
13 |
* version 2 for more details (a copy is included in the LICENSE file that
|
|
14 |
* accompanied this code).
|
|
15 |
*
|
|
16 |
* You should have received a copy of the GNU General Public License version
|
|
17 |
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
18 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
19 |
*
|
|
20 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
21 |
* or visit www.oracle.com if you need additional information or have any
|
|
22 |
* questions.
|
|
23 |
*
|
|
24 |
*/
|
|
25 |
|
|
26 |
#include "precompiled.hpp"
|
|
27 |
#include "c1/c1_Defs.hpp"
|
|
28 |
#include "c1/c1_MacroAssembler.hpp"
|
|
29 |
#include "c1/c1_Runtime1.hpp"
|
|
30 |
#include "interpreter/interpreter.hpp"
|
|
31 |
#include "nativeInst_ppc.hpp"
|
|
32 |
#include "oops/compiledICHolder.hpp"
|
|
33 |
#include "oops/oop.inline.hpp"
|
|
34 |
#include "prims/jvmtiExport.hpp"
|
|
35 |
#include "register_ppc.hpp"
|
|
36 |
#include "runtime/sharedRuntime.hpp"
|
|
37 |
#include "runtime/signature.hpp"
|
|
38 |
#include "runtime/vframeArray.hpp"
|
|
39 |
#include "utilities/macros.hpp"
|
|
40 |
#include "vmreg_ppc.inline.hpp"
|
|
41 |
#if INCLUDE_ALL_GCS
|
|
42 |
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
|
|
43 |
#endif
|
|
44 |
|
|
45 |
// Implementation of StubAssembler
|
|
46 |
|
|
47 |
int StubAssembler::call_RT(Register oop_result1, Register metadata_result,
|
|
48 |
address entry_point, int number_of_arguments) {
|
|
49 |
set_num_rt_args(0); // Nothing on stack
|
|
50 |
assert(!(oop_result1->is_valid() || metadata_result->is_valid()) ||
|
|
51 |
oop_result1 != metadata_result, "registers must be different");
|
|
52 |
|
|
53 |
// Currently no stack banging. We assume that there are enough
|
|
54 |
// StackShadowPages (which have been banged in generate_stack_overflow_check)
|
|
55 |
// for the stub frame and the runtime frames.
|
|
56 |
|
|
57 |
set_last_Java_frame(R1_SP, noreg);
|
|
58 |
|
|
59 |
// ARG1 must hold thread address.
|
|
60 |
mr(R3_ARG1, R16_thread);
|
|
61 |
|
|
62 |
address return_pc = call_c_with_frame_resize(entry_point, /*No resize, we have a C compatible frame.*/0);
|
|
63 |
|
|
64 |
reset_last_Java_frame();
|
|
65 |
|
|
66 |
// Check for pending exceptions.
|
|
67 |
{
|
|
68 |
ld(R0, in_bytes(Thread::pending_exception_offset()), R16_thread);
|
|
69 |
cmpdi(CCR0, R0, 0);
|
|
70 |
|
|
71 |
// This used to conditionally jump to forward_exception however it is
|
|
72 |
// possible if we relocate that the branch will not reach. So we must jump
|
|
73 |
// around so we can always reach.
|
|
74 |
|
|
75 |
Label ok;
|
|
76 |
beq(CCR0, ok);
|
|
77 |
|
|
78 |
// Make sure that the vm_results are cleared.
|
|
79 |
if (oop_result1->is_valid() || metadata_result->is_valid()) {
|
|
80 |
li(R0, 0);
|
|
81 |
if (oop_result1->is_valid()) {
|
|
82 |
std(R0, in_bytes(JavaThread::vm_result_offset()), R16_thread);
|
|
83 |
}
|
|
84 |
if (metadata_result->is_valid()) {
|
|
85 |
std(R0, in_bytes(JavaThread::vm_result_2_offset()), R16_thread);
|
|
86 |
}
|
|
87 |
}
|
|
88 |
|
|
89 |
if (frame_size() == no_frame_size) {
|
|
90 |
ShouldNotReachHere(); // We always have a frame size.
|
|
91 |
//pop_frame(); // pop the stub frame
|
|
92 |
//ld(R0, _abi(lr), R1_SP);
|
|
93 |
//mtlr(R0);
|
|
94 |
//load_const_optimized(R0, StubRoutines::forward_exception_entry());
|
|
95 |
//mtctr(R0);
|
|
96 |
//bctr();
|
|
97 |
} else if (_stub_id == Runtime1::forward_exception_id) {
|
|
98 |
should_not_reach_here();
|
|
99 |
} else {
|
|
100 |
// keep stub frame for next call_RT
|
|
101 |
//load_const_optimized(R0, Runtime1::entry_for(Runtime1::forward_exception_id));
|
|
102 |
add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(Runtime1::entry_for(Runtime1::forward_exception_id)));
|
|
103 |
mtctr(R0);
|
|
104 |
bctr();
|
|
105 |
}
|
|
106 |
|
|
107 |
bind(ok);
|
|
108 |
}
|
|
109 |
|
|
110 |
// Get oop results if there are any and reset the values in the thread.
|
|
111 |
if (oop_result1->is_valid()) {
|
|
112 |
get_vm_result(oop_result1);
|
|
113 |
}
|
|
114 |
if (metadata_result->is_valid()) {
|
|
115 |
get_vm_result_2(metadata_result);
|
|
116 |
}
|
|
117 |
|
|
118 |
return (int)(return_pc - code_section()->start());
|
|
119 |
}
|
|
120 |
|
|
121 |
|
|
122 |
int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) {
|
|
123 |
mr_if_needed(R4_ARG2, arg1);
|
|
124 |
return call_RT(oop_result1, metadata_result, entry, 1);
|
|
125 |
}
|
|
126 |
|
|
127 |
|
|
128 |
int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {
|
|
129 |
mr_if_needed(R4_ARG2, arg1);
|
|
130 |
mr_if_needed(R5_ARG3, arg2); assert(arg2 != R4_ARG2, "smashed argument");
|
|
131 |
return call_RT(oop_result1, metadata_result, entry, 2);
|
|
132 |
}
|
|
133 |
|
|
134 |
|
|
135 |
int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {
|
|
136 |
mr_if_needed(R4_ARG2, arg1);
|
|
137 |
mr_if_needed(R5_ARG3, arg2); assert(arg2 != R4_ARG2, "smashed argument");
|
|
138 |
mr_if_needed(R6_ARG4, arg3); assert(arg3 != R4_ARG2 && arg3 != R5_ARG3, "smashed argument");
|
|
139 |
return call_RT(oop_result1, metadata_result, entry, 3);
|
|
140 |
}
|
|
141 |
|
|
142 |
|
|
143 |
// Implementation of Runtime1
|
|
144 |
|
|
145 |
#define __ sasm->
|
|
146 |
|
|
147 |
static int cpu_reg_save_offsets[FrameMap::nof_cpu_regs];
|
|
148 |
static int fpu_reg_save_offsets[FrameMap::nof_fpu_regs];
|
|
149 |
static int frame_size_in_bytes = -1;
|
|
150 |
|
|
151 |
static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) {
|
|
152 |
assert(frame_size_in_bytes > frame::abi_reg_args_size, "init");
|
|
153 |
sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);
|
|
154 |
int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
|
|
155 |
OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
|
|
156 |
|
|
157 |
int i;
|
|
158 |
for (i = 0; i < FrameMap::nof_cpu_regs; i++) {
|
|
159 |
Register r = as_Register(i);
|
|
160 |
if (FrameMap::reg_needs_save(r)) {
|
|
161 |
int sp_offset = cpu_reg_save_offsets[i];
|
|
162 |
oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset>>2), r->as_VMReg());
|
|
163 |
oop_map->set_callee_saved(VMRegImpl::stack2reg((sp_offset>>2) + 1), r->as_VMReg()->next());
|
|
164 |
}
|
|
165 |
}
|
|
166 |
|
|
167 |
if (save_fpu_registers) {
|
|
168 |
for (i = 0; i < FrameMap::nof_fpu_regs; i++) {
|
|
169 |
FloatRegister r = as_FloatRegister(i);
|
|
170 |
int sp_offset = fpu_reg_save_offsets[i];
|
|
171 |
oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset>>2), r->as_VMReg());
|
|
172 |
oop_map->set_callee_saved(VMRegImpl::stack2reg((sp_offset>>2) + 1), r->as_VMReg()->next());
|
|
173 |
}
|
|
174 |
}
|
|
175 |
|
|
176 |
return oop_map;
|
|
177 |
}
|
|
178 |
|
|
179 |
static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true,
|
|
180 |
Register ret_pc = noreg, int stack_preserve = 0) {
|
|
181 |
if (ret_pc == noreg) {
|
|
182 |
ret_pc = R0;
|
|
183 |
__ mflr(ret_pc);
|
|
184 |
}
|
|
185 |
__ std(ret_pc, _abi(lr), R1_SP); // C code needs pc in C1 method.
|
|
186 |
__ push_frame(frame_size_in_bytes + stack_preserve, R0);
|
|
187 |
|
|
188 |
// Record volatile registers as callee-save values in an OopMap so
|
|
189 |
// their save locations will be propagated to the caller frame's
|
|
190 |
// RegisterMap during StackFrameStream construction (needed for
|
|
191 |
// deoptimization; see compiledVFrame::create_stack_value).
|
|
192 |
// OopMap frame sizes are in c2 stack slot sizes (sizeof(jint)).
|
|
193 |
|
|
194 |
int i;
|
|
195 |
for (i = 0; i < FrameMap::nof_cpu_regs; i++) {
|
|
196 |
Register r = as_Register(i);
|
|
197 |
if (FrameMap::reg_needs_save(r)) {
|
|
198 |
int sp_offset = cpu_reg_save_offsets[i];
|
|
199 |
__ std(r, sp_offset + STACK_BIAS, R1_SP);
|
|
200 |
}
|
|
201 |
}
|
|
202 |
|
|
203 |
if (save_fpu_registers) {
|
|
204 |
for (i = 0; i < FrameMap::nof_fpu_regs; i++) {
|
|
205 |
FloatRegister r = as_FloatRegister(i);
|
|
206 |
int sp_offset = fpu_reg_save_offsets[i];
|
|
207 |
__ stfd(r, sp_offset + STACK_BIAS, R1_SP);
|
|
208 |
}
|
|
209 |
}
|
|
210 |
|
|
211 |
return generate_oop_map(sasm, save_fpu_registers);
|
|
212 |
}
|
|
213 |
|
|
214 |
static void restore_live_registers(StubAssembler* sasm, Register result1, Register result2,
|
|
215 |
bool restore_fpu_registers = true) {
|
|
216 |
for (int i = 0; i < FrameMap::nof_cpu_regs; i++) {
|
|
217 |
Register r = as_Register(i);
|
|
218 |
if (FrameMap::reg_needs_save(r) && r != result1 && r != result2) {
|
|
219 |
int sp_offset = cpu_reg_save_offsets[i];
|
|
220 |
__ ld(r, sp_offset + STACK_BIAS, R1_SP);
|
|
221 |
}
|
|
222 |
}
|
|
223 |
|
|
224 |
if (restore_fpu_registers) {
|
|
225 |
for (int i = 0; i < FrameMap::nof_fpu_regs; i++) {
|
|
226 |
FloatRegister r = as_FloatRegister(i);
|
|
227 |
int sp_offset = fpu_reg_save_offsets[i];
|
|
228 |
__ lfd(r, sp_offset + STACK_BIAS, R1_SP);
|
|
229 |
}
|
|
230 |
}
|
|
231 |
|
|
232 |
__ pop_frame();
|
|
233 |
__ ld(R0, _abi(lr), R1_SP);
|
|
234 |
__ mtlr(R0);
|
|
235 |
}
|
|
236 |
|
|
237 |
|
|
238 |
void Runtime1::initialize_pd() {
|
|
239 |
int i;
|
|
240 |
int sp_offset = frame::abi_reg_args_size;
|
|
241 |
|
|
242 |
for (i = 0; i < FrameMap::nof_cpu_regs; i++) {
|
|
243 |
Register r = as_Register(i);
|
|
244 |
if (FrameMap::reg_needs_save(r)) {
|
|
245 |
cpu_reg_save_offsets[i] = sp_offset;
|
|
246 |
sp_offset += BytesPerWord;
|
|
247 |
}
|
|
248 |
}
|
|
249 |
|
|
250 |
for (i = 0; i < FrameMap::nof_fpu_regs; i++) {
|
|
251 |
fpu_reg_save_offsets[i] = sp_offset;
|
|
252 |
sp_offset += BytesPerWord;
|
|
253 |
}
|
|
254 |
frame_size_in_bytes = align_size_up(sp_offset, frame::alignment_in_bytes);
|
|
255 |
}
|
|
256 |
|
|
257 |
|
|
258 |
OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
|
|
259 |
// Make a frame and preserve the caller's caller-save registers.
|
|
260 |
OopMap* oop_map = save_live_registers(sasm);
|
|
261 |
|
|
262 |
int call_offset;
|
|
263 |
if (!has_argument) {
|
|
264 |
call_offset = __ call_RT(noreg, noreg, target);
|
|
265 |
} else {
|
|
266 |
call_offset = __ call_RT(noreg, noreg, target, R4_ARG2);
|
|
267 |
}
|
|
268 |
OopMapSet* oop_maps = new OopMapSet();
|
|
269 |
oop_maps->add_gc_map(call_offset, oop_map);
|
|
270 |
|
|
271 |
__ should_not_reach_here();
|
|
272 |
return oop_maps;
|
|
273 |
}
|
|
274 |
|
|
275 |
static OopMapSet* generate_exception_throw_with_stack_parms(StubAssembler* sasm, address target,
|
|
276 |
int stack_parms) {
|
|
277 |
// Make a frame and preserve the caller's caller-save registers.
|
|
278 |
const int parm_size_in_bytes = align_size_up(stack_parms << LogBytesPerWord, frame::alignment_in_bytes);
|
|
279 |
const int padding = parm_size_in_bytes - (stack_parms << LogBytesPerWord);
|
|
280 |
OopMap* oop_map = save_live_registers(sasm, true, noreg, parm_size_in_bytes);
|
|
281 |
|
|
282 |
int call_offset = 0;
|
|
283 |
switch (stack_parms) {
|
|
284 |
case 3:
|
|
285 |
__ ld(R6_ARG4, frame_size_in_bytes + padding + 16, R1_SP);
|
|
286 |
case 2:
|
|
287 |
__ ld(R5_ARG3, frame_size_in_bytes + padding + 8, R1_SP);
|
|
288 |
case 1:
|
|
289 |
__ ld(R4_ARG2, frame_size_in_bytes + padding + 0, R1_SP);
|
|
290 |
call_offset = __ call_RT(noreg, noreg, target);
|
|
291 |
break;
|
|
292 |
default: Unimplemented(); break;
|
|
293 |
}
|
|
294 |
OopMapSet* oop_maps = new OopMapSet();
|
|
295 |
oop_maps->add_gc_map(call_offset, oop_map);
|
|
296 |
|
|
297 |
__ should_not_reach_here();
|
|
298 |
return oop_maps;
|
|
299 |
}
|
|
300 |
|
|
301 |
|
|
302 |
OopMapSet* Runtime1::generate_stub_call(StubAssembler* sasm, Register result, address target,
|
|
303 |
Register arg1, Register arg2, Register arg3) {
|
|
304 |
// Make a frame and preserve the caller's caller-save registers.
|
|
305 |
OopMap* oop_map = save_live_registers(sasm);
|
|
306 |
|
|
307 |
int call_offset;
|
|
308 |
if (arg1 == noreg) {
|
|
309 |
call_offset = __ call_RT(result, noreg, target);
|
|
310 |
} else if (arg2 == noreg) {
|
|
311 |
call_offset = __ call_RT(result, noreg, target, arg1);
|
|
312 |
} else if (arg3 == noreg) {
|
|
313 |
call_offset = __ call_RT(result, noreg, target, arg1, arg2);
|
|
314 |
} else {
|
|
315 |
call_offset = __ call_RT(result, noreg, target, arg1, arg2, arg3);
|
|
316 |
}
|
|
317 |
OopMapSet* oop_maps = new OopMapSet();
|
|
318 |
oop_maps->add_gc_map(call_offset, oop_map);
|
|
319 |
|
|
320 |
restore_live_registers(sasm, result, noreg);
|
|
321 |
__ blr();
|
|
322 |
return oop_maps;
|
|
323 |
}
|
|
324 |
|
|
325 |
static OopMapSet* stub_call_with_stack_parms(StubAssembler* sasm, Register result, address target,
|
|
326 |
int stack_parms, bool do_return = true) {
|
|
327 |
// Make a frame and preserve the caller's caller-save registers.
|
|
328 |
const int parm_size_in_bytes = align_size_up(stack_parms << LogBytesPerWord, frame::alignment_in_bytes);
|
|
329 |
const int padding = parm_size_in_bytes - (stack_parms << LogBytesPerWord);
|
|
330 |
OopMap* oop_map = save_live_registers(sasm, true, noreg, parm_size_in_bytes);
|
|
331 |
|
|
332 |
int call_offset = 0;
|
|
333 |
switch (stack_parms) {
|
|
334 |
case 3:
|
|
335 |
__ ld(R6_ARG4, frame_size_in_bytes + padding + 16, R1_SP);
|
|
336 |
case 2:
|
|
337 |
__ ld(R5_ARG3, frame_size_in_bytes + padding + 8, R1_SP);
|
|
338 |
case 1:
|
|
339 |
__ ld(R4_ARG2, frame_size_in_bytes + padding + 0, R1_SP);
|
|
340 |
call_offset = __ call_RT(result, noreg, target);
|
|
341 |
break;
|
|
342 |
default: Unimplemented(); break;
|
|
343 |
}
|
|
344 |
OopMapSet* oop_maps = new OopMapSet();
|
|
345 |
oop_maps->add_gc_map(call_offset, oop_map);
|
|
346 |
|
|
347 |
restore_live_registers(sasm, result, noreg);
|
|
348 |
if (do_return) __ blr();
|
|
349 |
return oop_maps;
|
|
350 |
}
|
|
351 |
|
|
352 |
|
|
353 |
OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
|
|
354 |
// Make a frame and preserve the caller's caller-save registers.
|
|
355 |
OopMap* oop_map = save_live_registers(sasm);
|
|
356 |
|
|
357 |
// Call the runtime patching routine, returns non-zero if nmethod got deopted.
|
|
358 |
int call_offset = __ call_RT(noreg, noreg, target);
|
|
359 |
OopMapSet* oop_maps = new OopMapSet();
|
|
360 |
oop_maps->add_gc_map(call_offset, oop_map);
|
|
361 |
__ cmpdi(CCR0, R3_RET, 0);
|
|
362 |
|
|
363 |
// Re-execute the patched instruction or, if the nmethod was deoptmized,
|
|
364 |
// return to the deoptimization handler entry that will cause re-execution
|
|
365 |
// of the current bytecode.
|
|
366 |
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
|
|
367 |
assert(deopt_blob != NULL, "deoptimization blob must have been created");
|
|
368 |
|
|
369 |
// Return to the deoptimization handler entry for unpacking and rexecute.
|
|
370 |
// If we simply returned the we'd deopt as if any call we patched had just
|
|
371 |
// returned.
|
|
372 |
|
|
373 |
restore_live_registers(sasm, noreg, noreg);
|
|
374 |
// Return if patching routine returned 0.
|
|
375 |
__ bclr(Assembler::bcondCRbiIs1, Assembler::bi0(CCR0, Assembler::equal), Assembler::bhintbhBCLRisReturn);
|
|
376 |
|
|
377 |
address stub = deopt_blob->unpack_with_reexecution();
|
|
378 |
//__ load_const_optimized(R0, stub);
|
|
379 |
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
|
|
380 |
__ mtctr(R0);
|
|
381 |
__ bctr();
|
|
382 |
|
|
383 |
return oop_maps;
|
|
384 |
}
|
|
385 |
|
|
386 |
OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
|
387 |
OopMapSet* oop_maps = NULL;
|
|
388 |
|
|
389 |
// For better readability.
|
|
390 |
const bool must_gc_arguments = true;
|
|
391 |
const bool dont_gc_arguments = false;
|
|
392 |
|
|
393 |
// Stub code & info for the different stubs.
|
|
394 |
switch (id) {
|
|
395 |
case forward_exception_id:
|
|
396 |
{
|
|
397 |
oop_maps = generate_handle_exception(id, sasm);
|
|
398 |
}
|
|
399 |
break;
|
|
400 |
|
|
401 |
case new_instance_id:
|
|
402 |
case fast_new_instance_id:
|
|
403 |
case fast_new_instance_init_check_id:
|
|
404 |
{
|
|
405 |
if (id == new_instance_id) {
|
|
406 |
__ set_info("new_instance", dont_gc_arguments);
|
|
407 |
} else if (id == fast_new_instance_id) {
|
|
408 |
__ set_info("fast new_instance", dont_gc_arguments);
|
|
409 |
} else {
|
|
410 |
assert(id == fast_new_instance_init_check_id, "bad StubID");
|
|
411 |
__ set_info("fast new_instance init check", dont_gc_arguments);
|
|
412 |
}
|
|
413 |
// We don't support eden allocation.
|
|
414 |
// if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
|
|
415 |
// UseTLAB && FastTLABRefill) {
|
|
416 |
// if (id == fast_new_instance_init_check_id) {
|
|
417 |
// // make sure the klass is initialized
|
|
418 |
// __ lbz(R0, in_bytes(InstanceKlass::init_state_offset()), R3_ARG1);
|
|
419 |
// __ cmpwi(CCR0, R0, InstanceKlass::fully_initialized);
|
|
420 |
// __ bne(CCR0, slow_path);
|
|
421 |
// }
|
|
422 |
//#ifdef ASSERT
|
|
423 |
// // assert object can be fast path allocated
|
|
424 |
// {
|
|
425 |
// Label ok, not_ok;
|
|
426 |
// __ lwz(R0, in_bytes(Klass::layout_helper_offset()), R3_ARG1);
|
|
427 |
// // make sure it's an instance (LH > 0)
|
|
428 |
// __ cmpwi(CCR0, R0, 0);
|
|
429 |
// __ ble(CCR0, not_ok);
|
|
430 |
// __ testbitdi(CCR0, R0, R0, Klass::_lh_instance_slow_path_bit);
|
|
431 |
// __ beq(CCR0, ok);
|
|
432 |
//
|
|
433 |
// __ bind(not_ok);
|
|
434 |
// __ stop("assert(can be fast path allocated)");
|
|
435 |
// __ bind(ok);
|
|
436 |
// }
|
|
437 |
//#endif // ASSERT
|
|
438 |
// // We don't support eden allocation.
|
|
439 |
// __ bind(slow_path);
|
|
440 |
// }
|
|
441 |
oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_instance), R4_ARG2);
|
|
442 |
}
|
|
443 |
break;
|
|
444 |
|
|
445 |
case counter_overflow_id:
|
|
446 |
// Bci and method are on stack.
|
|
447 |
oop_maps = stub_call_with_stack_parms(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), 2);
|
|
448 |
break;
|
|
449 |
|
|
450 |
case new_type_array_id:
|
|
451 |
case new_object_array_id:
|
|
452 |
{
|
|
453 |
if (id == new_type_array_id) {
|
|
454 |
__ set_info("new_type_array", dont_gc_arguments);
|
|
455 |
} else {
|
|
456 |
__ set_info("new_object_array", dont_gc_arguments);
|
|
457 |
}
|
|
458 |
|
|
459 |
#ifdef ASSERT
|
|
460 |
// Assert object type is really an array of the proper kind.
|
|
461 |
{
|
|
462 |
int tag = (id == new_type_array_id) ? Klass::_lh_array_tag_type_value : Klass::_lh_array_tag_obj_value;
|
|
463 |
Label ok;
|
|
464 |
__ lwz(R0, in_bytes(Klass::layout_helper_offset()), R4_ARG2);
|
|
465 |
__ srawi(R0, R0, Klass::_lh_array_tag_shift);
|
|
466 |
__ cmpwi(CCR0, R0, tag);
|
|
467 |
__ beq(CCR0, ok);
|
|
468 |
__ stop("assert(is an array klass)");
|
|
469 |
__ should_not_reach_here();
|
|
470 |
__ bind(ok);
|
|
471 |
}
|
|
472 |
#endif // ASSERT
|
|
473 |
|
|
474 |
// We don't support eden allocation.
|
|
475 |
|
|
476 |
if (id == new_type_array_id) {
|
|
477 |
oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_type_array), R4_ARG2, R5_ARG3);
|
|
478 |
} else {
|
|
479 |
oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_object_array), R4_ARG2, R5_ARG3);
|
|
480 |
}
|
|
481 |
}
|
|
482 |
break;
|
|
483 |
|
|
484 |
case new_multi_array_id:
|
|
485 |
{
|
|
486 |
// R4: klass
|
|
487 |
// R5: rank
|
|
488 |
// R6: address of 1st dimension
|
|
489 |
__ set_info("new_multi_array", dont_gc_arguments);
|
|
490 |
oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_multi_array), R4_ARG2, R5_ARG3, R6_ARG4);
|
|
491 |
}
|
|
492 |
break;
|
|
493 |
|
|
494 |
case register_finalizer_id:
|
|
495 |
{
|
|
496 |
__ set_info("register_finalizer", dont_gc_arguments);
|
|
497 |
// This code is called via rt_call. Hence, caller-save registers have been saved.
|
|
498 |
Register t = R11_scratch1;
|
|
499 |
|
|
500 |
// Load the klass and check the has finalizer flag.
|
|
501 |
__ load_klass(t, R3_ARG1);
|
|
502 |
__ lwz(t, in_bytes(Klass::access_flags_offset()), t);
|
|
503 |
__ testbitdi(CCR0, R0, t, exact_log2(JVM_ACC_HAS_FINALIZER));
|
|
504 |
// Return if has_finalizer bit == 0 (CR0.eq).
|
|
505 |
__ bclr(Assembler::bcondCRbiIs1, Assembler::bi0(CCR0, Assembler::equal), Assembler::bhintbhBCLRisReturn);
|
|
506 |
|
|
507 |
__ mflr(R0);
|
|
508 |
__ std(R0, _abi(lr), R1_SP);
|
|
509 |
__ push_frame(frame::abi_reg_args_size, R0); // Empty dummy frame (no callee-save regs).
|
|
510 |
sasm->set_frame_size(frame::abi_reg_args_size / BytesPerWord);
|
|
511 |
OopMap* oop_map = new OopMap(frame::abi_reg_args_size / sizeof(jint), 0);
|
|
512 |
int call_offset = __ call_RT(noreg, noreg,
|
|
513 |
CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), R3_ARG1);
|
|
514 |
oop_maps = new OopMapSet();
|
|
515 |
oop_maps->add_gc_map(call_offset, oop_map);
|
|
516 |
|
|
517 |
__ pop_frame();
|
|
518 |
__ ld(R0, _abi(lr), R1_SP);
|
|
519 |
__ mtlr(R0);
|
|
520 |
__ blr();
|
|
521 |
}
|
|
522 |
break;
|
|
523 |
|
|
524 |
case throw_range_check_failed_id:
|
|
525 |
{
|
|
526 |
__ set_info("range_check_failed", dont_gc_arguments); // Arguments will be discarded.
|
|
527 |
__ std(R0, -8, R1_SP); // Pass index on stack.
|
|
528 |
oop_maps = generate_exception_throw_with_stack_parms(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), 1);
|
|
529 |
}
|
|
530 |
break;
|
|
531 |
|
|
532 |
case throw_index_exception_id:
|
|
533 |
{
|
|
534 |
__ set_info("index_range_check_failed", dont_gc_arguments); // Arguments will be discarded.
|
|
535 |
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
|
|
536 |
}
|
|
537 |
break;
|
|
538 |
|
|
539 |
case throw_div0_exception_id:
|
|
540 |
{
|
|
541 |
__ set_info("throw_div0_exception", dont_gc_arguments);
|
|
542 |
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
|
|
543 |
}
|
|
544 |
break;
|
|
545 |
|
|
546 |
case throw_null_pointer_exception_id:
|
|
547 |
{
|
|
548 |
__ set_info("throw_null_pointer_exception", dont_gc_arguments);
|
|
549 |
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
|
|
550 |
}
|
|
551 |
break;
|
|
552 |
|
|
553 |
case handle_exception_nofpu_id:
|
|
554 |
case handle_exception_id:
|
|
555 |
{
|
|
556 |
__ set_info("handle_exception", dont_gc_arguments);
|
|
557 |
oop_maps = generate_handle_exception(id, sasm);
|
|
558 |
}
|
|
559 |
break;
|
|
560 |
|
|
561 |
case handle_exception_from_callee_id:
|
|
562 |
{
|
|
563 |
__ set_info("handle_exception_from_callee", dont_gc_arguments);
|
|
564 |
oop_maps = generate_handle_exception(id, sasm);
|
|
565 |
}
|
|
566 |
break;
|
|
567 |
|
|
568 |
case unwind_exception_id:
|
|
569 |
{
|
|
570 |
const Register Rexception = R3 /*LIRGenerator::exceptionOopOpr()*/,
|
|
571 |
Rexception_pc = R4 /*LIRGenerator::exceptionPcOpr()*/,
|
|
572 |
Rexception_save = R31, Rcaller_sp = R30;
|
|
573 |
__ set_info("unwind_exception", dont_gc_arguments);
|
|
574 |
|
|
575 |
__ ld(Rcaller_sp, 0, R1_SP);
|
|
576 |
__ push_frame_reg_args(0, R0); // dummy frame for C call
|
|
577 |
__ mr(Rexception_save, Rexception); // save over C call
|
|
578 |
__ ld(Rexception_pc, _abi(lr), Rcaller_sp); // return pc
|
|
579 |
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), R16_thread, Rexception_pc);
|
|
580 |
__ verify_not_null_oop(Rexception_save);
|
|
581 |
__ mtctr(R3_RET);
|
|
582 |
__ ld(Rexception_pc, _abi(lr), Rcaller_sp); // return pc
|
|
583 |
__ mr(R1_SP, Rcaller_sp); // Pop both frames at once.
|
|
584 |
__ mr(Rexception, Rexception_save); // restore
|
|
585 |
__ mtlr(Rexception_pc);
|
|
586 |
__ bctr();
|
|
587 |
}
|
|
588 |
break;
|
|
589 |
|
|
590 |
case throw_array_store_exception_id:
|
|
591 |
{
|
|
592 |
__ set_info("throw_array_store_exception", dont_gc_arguments);
|
|
593 |
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
|
|
594 |
}
|
|
595 |
break;
|
|
596 |
|
|
597 |
case throw_class_cast_exception_id:
|
|
598 |
{
|
|
599 |
__ set_info("throw_class_cast_exception", dont_gc_arguments);
|
|
600 |
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
|
|
601 |
}
|
|
602 |
break;
|
|
603 |
|
|
604 |
case throw_incompatible_class_change_error_id:
|
|
605 |
{
|
|
606 |
__ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments);
|
|
607 |
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
|
|
608 |
}
|
|
609 |
break;
|
|
610 |
|
|
611 |
case slow_subtype_check_id:
|
|
612 |
{ // Support for uint StubRoutine::partial_subtype_check( Klass sub, Klass super );
|
|
613 |
const Register sub_klass = R5,
|
|
614 |
super_klass = R4,
|
|
615 |
temp1_reg = R6,
|
|
616 |
temp2_reg = R0;
|
|
617 |
__ check_klass_subtype_slow_path(sub_klass, super_klass, temp1_reg, temp2_reg); // returns with CR0.eq if successful
|
|
618 |
__ crandc(CCR0, Assembler::equal, CCR0, Assembler::equal); // failed: CR0.ne
|
|
619 |
__ blr();
|
|
620 |
}
|
|
621 |
break;
|
|
622 |
|
|
623 |
case monitorenter_nofpu_id:
|
|
624 |
case monitorenter_id:
|
|
625 |
{
|
|
626 |
__ set_info("monitorenter", dont_gc_arguments);
|
|
627 |
|
|
628 |
int save_fpu_registers = (id == monitorenter_id);
|
|
629 |
// Make a frame and preserve the caller's caller-save registers.
|
|
630 |
OopMap* oop_map = save_live_registers(sasm, save_fpu_registers);
|
|
631 |
|
|
632 |
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), R4_ARG2, R5_ARG3);
|
|
633 |
|
|
634 |
oop_maps = new OopMapSet();
|
|
635 |
oop_maps->add_gc_map(call_offset, oop_map);
|
|
636 |
|
|
637 |
restore_live_registers(sasm, noreg, noreg, save_fpu_registers);
|
|
638 |
__ blr();
|
|
639 |
}
|
|
640 |
break;
|
|
641 |
|
|
642 |
case monitorexit_nofpu_id:
|
|
643 |
case monitorexit_id:
|
|
644 |
{
|
|
645 |
// note: Really a leaf routine but must setup last java sp
|
|
646 |
// => use call_RT for now (speed can be improved by
|
|
647 |
// doing last java sp setup manually).
|
|
648 |
__ set_info("monitorexit", dont_gc_arguments);
|
|
649 |
|
|
650 |
int save_fpu_registers = (id == monitorexit_id);
|
|
651 |
// Make a frame and preserve the caller's caller-save registers.
|
|
652 |
OopMap* oop_map = save_live_registers(sasm, save_fpu_registers);
|
|
653 |
|
|
654 |
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), R4_ARG2);
|
|
655 |
|
|
656 |
oop_maps = new OopMapSet();
|
|
657 |
oop_maps->add_gc_map(call_offset, oop_map);
|
|
658 |
|
|
659 |
restore_live_registers(sasm, noreg, noreg, save_fpu_registers);
|
|
660 |
__ blr();
|
|
661 |
}
|
|
662 |
break;
|
|
663 |
|
|
664 |
case deoptimize_id:
|
|
665 |
{
|
|
666 |
__ set_info("deoptimize", dont_gc_arguments);
|
|
667 |
__ std(R0, -8, R1_SP); // Pass trap_request on stack.
|
|
668 |
oop_maps = stub_call_with_stack_parms(sasm, noreg, CAST_FROM_FN_PTR(address, deoptimize), 1, /*do_return*/ false);
|
|
669 |
|
|
670 |
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
|
|
671 |
assert(deopt_blob != NULL, "deoptimization blob must have been created");
|
|
672 |
address stub = deopt_blob->unpack_with_reexecution();
|
|
673 |
//__ load_const_optimized(R0, stub);
|
|
674 |
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
|
|
675 |
__ mtctr(R0);
|
|
676 |
__ bctr();
|
|
677 |
}
|
|
678 |
break;
|
|
679 |
|
|
680 |
case access_field_patching_id:
|
|
681 |
{
|
|
682 |
__ set_info("access_field_patching", dont_gc_arguments);
|
|
683 |
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
|
|
684 |
}
|
|
685 |
break;
|
|
686 |
|
|
687 |
case load_klass_patching_id:
|
|
688 |
{
|
|
689 |
__ set_info("load_klass_patching", dont_gc_arguments);
|
|
690 |
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
|
|
691 |
}
|
|
692 |
break;
|
|
693 |
|
|
694 |
case load_mirror_patching_id:
|
|
695 |
{
|
|
696 |
__ set_info("load_mirror_patching", dont_gc_arguments);
|
|
697 |
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
|
|
698 |
}
|
|
699 |
break;
|
|
700 |
|
|
701 |
case load_appendix_patching_id:
|
|
702 |
{
|
|
703 |
__ set_info("load_appendix_patching", dont_gc_arguments);
|
|
704 |
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
|
|
705 |
}
|
|
706 |
break;
|
|
707 |
|
|
708 |
case dtrace_object_alloc_id:
|
|
709 |
{ // O0: object
|
|
710 |
__ unimplemented("stub dtrace_object_alloc_id");
|
|
711 |
__ set_info("dtrace_object_alloc", dont_gc_arguments);
|
|
712 |
// // We can't gc here so skip the oopmap but make sure that all
|
|
713 |
// // the live registers get saved.
|
|
714 |
// save_live_registers(sasm);
|
|
715 |
//
|
|
716 |
// __ save_thread(L7_thread_cache);
|
|
717 |
// __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc),
|
|
718 |
// relocInfo::runtime_call_type);
|
|
719 |
// __ delayed()->mov(I0, O0);
|
|
720 |
// __ restore_thread(L7_thread_cache);
|
|
721 |
//
|
|
722 |
// restore_live_registers(sasm);
|
|
723 |
// __ ret();
|
|
724 |
// __ delayed()->restore();
|
|
725 |
}
|
|
726 |
break;
|
|
727 |
|
|
728 |
#if INCLUDE_ALL_GCS
|
|
729 |
case g1_pre_barrier_slow_id:
|
|
730 |
{
|
|
731 |
BarrierSet* bs = Universe::heap()->barrier_set();
|
|
732 |
if (bs->kind() != BarrierSet::G1SATBCTLogging) {
|
|
733 |
goto unimplemented_entry;
|
|
734 |
}
|
|
735 |
|
|
736 |
__ set_info("g1_pre_barrier_slow_id", dont_gc_arguments);
|
|
737 |
|
|
738 |
// Using stack slots: pre_val (pre-pushed), spill tmp, spill tmp2.
|
|
739 |
const int stack_slots = 3;
|
|
740 |
Register pre_val = R0; // previous value of memory
|
|
741 |
Register tmp = R14;
|
|
742 |
Register tmp2 = R15;
|
|
743 |
|
|
744 |
Label refill, restart;
|
|
745 |
int satb_q_index_byte_offset =
|
|
746 |
in_bytes(JavaThread::satb_mark_queue_offset() +
|
|
747 |
SATBMarkQueue::byte_offset_of_index());
|
|
748 |
int satb_q_buf_byte_offset =
|
|
749 |
in_bytes(JavaThread::satb_mark_queue_offset() +
|
|
750 |
SATBMarkQueue::byte_offset_of_buf());
|
|
751 |
|
|
752 |
// Spill
|
|
753 |
__ std(tmp, -16, R1_SP);
|
|
754 |
__ std(tmp2, -24, R1_SP);
|
|
755 |
|
|
756 |
__ bind(restart);
|
|
757 |
// Load the index into the SATB buffer. SATBMarkQueue::_index is a
|
|
758 |
// size_t so ld_ptr is appropriate.
|
|
759 |
__ ld(tmp, satb_q_index_byte_offset, R16_thread);
|
|
760 |
|
|
761 |
// index == 0?
|
|
762 |
__ cmpdi(CCR0, tmp, 0);
|
|
763 |
__ beq(CCR0, refill);
|
|
764 |
|
|
765 |
__ ld(tmp2, satb_q_buf_byte_offset, R16_thread);
|
|
766 |
__ ld(pre_val, -8, R1_SP); // Load from stack.
|
|
767 |
__ addi(tmp, tmp, -oopSize);
|
|
768 |
|
|
769 |
__ std(tmp, satb_q_index_byte_offset, R16_thread);
|
|
770 |
__ stdx(pre_val, tmp2, tmp); // [_buf + index] := <address_of_card>
|
|
771 |
|
|
772 |
// Restore temp registers and return-from-leaf.
|
|
773 |
__ ld(tmp2, -24, R1_SP);
|
|
774 |
__ ld(tmp, -16, R1_SP);
|
|
775 |
__ blr();
|
|
776 |
|
|
777 |
__ bind(refill);
|
|
778 |
const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_slots) * BytesPerWord;
|
|
779 |
__ save_volatile_gprs(R1_SP, -nbytes_save); // except R0
|
|
780 |
__ mflr(R0);
|
|
781 |
__ std(R0, _abi(lr), R1_SP);
|
|
782 |
__ push_frame_reg_args(nbytes_save, R0); // dummy frame for C call
|
|
783 |
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SATBMarkQueueSet::handle_zero_index_for_thread), R16_thread);
|
|
784 |
__ pop_frame();
|
|
785 |
__ ld(R0, _abi(lr), R1_SP);
|
|
786 |
__ mtlr(R0);
|
|
787 |
__ restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
|
|
788 |
__ b(restart);
|
|
789 |
}
|
|
790 |
break;
|
|
791 |
|
|
792 |
case g1_post_barrier_slow_id:
|
|
793 |
{
|
|
794 |
BarrierSet* bs = Universe::heap()->barrier_set();
|
|
795 |
if (bs->kind() != BarrierSet::G1SATBCTLogging) {
|
|
796 |
goto unimplemented_entry;
|
|
797 |
}
|
|
798 |
|
|
799 |
__ set_info("g1_post_barrier_slow_id", dont_gc_arguments);
|
|
800 |
|
|
801 |
// Using stack slots: spill addr, spill tmp2
|
|
802 |
const int stack_slots = 2;
|
|
803 |
Register tmp = R0;
|
|
804 |
Register addr = R14;
|
|
805 |
Register tmp2 = R15;
|
|
806 |
jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base;
|
|
807 |
|
|
808 |
Label restart, refill, ret;
|
|
809 |
|
|
810 |
// Spill
|
|
811 |
__ std(addr, -8, R1_SP);
|
|
812 |
__ std(tmp2, -16, R1_SP);
|
|
813 |
|
|
814 |
__ srdi(addr, R0, CardTableModRefBS::card_shift); // Addr is passed in R0.
|
|
815 |
__ load_const_optimized(/*cardtable*/ tmp2, byte_map_base, tmp);
|
|
816 |
__ add(addr, tmp2, addr);
|
|
817 |
__ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
|
|
818 |
|
|
819 |
// Return if young card.
|
|
820 |
__ cmpwi(CCR0, tmp, G1SATBCardTableModRefBS::g1_young_card_val());
|
|
821 |
__ beq(CCR0, ret);
|
|
822 |
|
|
823 |
// Return if sequential consistent value is already dirty.
|
|
824 |
__ membar(Assembler::StoreLoad);
|
|
825 |
__ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
|
|
826 |
|
|
827 |
__ cmpwi(CCR0, tmp, G1SATBCardTableModRefBS::dirty_card_val());
|
|
828 |
__ beq(CCR0, ret);
|
|
829 |
|
|
830 |
// Not dirty.
|
|
831 |
|
|
832 |
// First, dirty it.
|
|
833 |
__ li(tmp, G1SATBCardTableModRefBS::dirty_card_val());
|
|
834 |
__ stb(tmp, 0, addr);
|
|
835 |
|
|
836 |
int dirty_card_q_index_byte_offset =
|
|
837 |
in_bytes(JavaThread::dirty_card_queue_offset() +
|
|
838 |
DirtyCardQueue::byte_offset_of_index());
|
|
839 |
int dirty_card_q_buf_byte_offset =
|
|
840 |
in_bytes(JavaThread::dirty_card_queue_offset() +
|
|
841 |
DirtyCardQueue::byte_offset_of_buf());
|
|
842 |
|
|
843 |
__ bind(restart);
|
|
844 |
|
|
845 |
// Get the index into the update buffer. DirtyCardQueue::_index is
|
|
846 |
// a size_t so ld_ptr is appropriate here.
|
|
847 |
__ ld(tmp2, dirty_card_q_index_byte_offset, R16_thread);
|
|
848 |
|
|
849 |
// index == 0?
|
|
850 |
__ cmpdi(CCR0, tmp2, 0);
|
|
851 |
__ beq(CCR0, refill);
|
|
852 |
|
|
853 |
__ ld(tmp, dirty_card_q_buf_byte_offset, R16_thread);
|
|
854 |
__ addi(tmp2, tmp2, -oopSize);
|
|
855 |
|
|
856 |
__ std(tmp2, dirty_card_q_index_byte_offset, R16_thread);
|
|
857 |
__ add(tmp2, tmp, tmp2);
|
|
858 |
__ std(addr, 0, tmp2); // [_buf + index] := <address_of_card>
|
|
859 |
|
|
860 |
// Restore temp registers and return-from-leaf.
|
|
861 |
__ bind(ret);
|
|
862 |
__ ld(tmp2, -16, R1_SP);
|
|
863 |
__ ld(addr, -8, R1_SP);
|
|
864 |
__ blr();
|
|
865 |
|
|
866 |
__ bind(refill);
|
|
867 |
const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_slots) * BytesPerWord;
|
|
868 |
__ save_volatile_gprs(R1_SP, -nbytes_save); // except R0
|
|
869 |
__ mflr(R0);
|
|
870 |
__ std(R0, _abi(lr), R1_SP);
|
|
871 |
__ push_frame_reg_args(nbytes_save, R0); // dummy frame for C call
|
|
872 |
__ call_VM_leaf(CAST_FROM_FN_PTR(address, DirtyCardQueueSet::handle_zero_index_for_thread), R16_thread);
|
|
873 |
__ pop_frame();
|
|
874 |
__ ld(R0, _abi(lr), R1_SP);
|
|
875 |
__ mtlr(R0);
|
|
876 |
__ restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
|
|
877 |
__ b(restart);
|
|
878 |
}
|
|
879 |
break;
|
|
880 |
#endif // INCLUDE_ALL_GCS
|
|
881 |
|
|
882 |
case predicate_failed_trap_id:
|
|
883 |
{
|
|
884 |
__ set_info("predicate_failed_trap", dont_gc_arguments);
|
|
885 |
OopMap* oop_map = save_live_registers(sasm);
|
|
886 |
|
|
887 |
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
|
|
888 |
|
|
889 |
oop_maps = new OopMapSet();
|
|
890 |
oop_maps->add_gc_map(call_offset, oop_map);
|
|
891 |
|
|
892 |
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
|
|
893 |
assert(deopt_blob != NULL, "deoptimization blob must have been created");
|
|
894 |
restore_live_registers(sasm, noreg, noreg);
|
|
895 |
|
|
896 |
address stub = deopt_blob->unpack_with_reexecution();
|
|
897 |
//__ load_const_optimized(R0, stub);
|
|
898 |
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
|
|
899 |
__ mtctr(R0);
|
|
900 |
__ bctr();
|
|
901 |
}
|
|
902 |
break;
|
|
903 |
|
|
904 |
default:
|
|
905 |
unimplemented_entry:
|
|
906 |
{
|
|
907 |
__ set_info("unimplemented entry", dont_gc_arguments);
|
|
908 |
__ mflr(R0);
|
|
909 |
__ std(R0, _abi(lr), R1_SP);
|
|
910 |
__ push_frame(frame::abi_reg_args_size, R0); // empty dummy frame
|
|
911 |
sasm->set_frame_size(frame::abi_reg_args_size / BytesPerWord);
|
|
912 |
OopMap* oop_map = new OopMap(frame::abi_reg_args_size / sizeof(jint), 0);
|
|
913 |
|
|
914 |
__ load_const_optimized(R4_ARG2, (int)id);
|
|
915 |
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), R4_ARG2);
|
|
916 |
|
|
917 |
oop_maps = new OopMapSet();
|
|
918 |
oop_maps->add_gc_map(call_offset, oop_map);
|
|
919 |
__ should_not_reach_here();
|
|
920 |
}
|
|
921 |
break;
|
|
922 |
}
|
|
923 |
return oop_maps;
|
|
924 |
}
|
|
925 |
|
|
926 |
|
|
927 |
OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) {
|
|
928 |
__ block_comment("generate_handle_exception");
|
|
929 |
|
|
930 |
// Save registers, if required.
|
|
931 |
OopMapSet* oop_maps = new OopMapSet();
|
|
932 |
OopMap* oop_map = NULL;
|
|
933 |
const Register Rexception = R3 /*LIRGenerator::exceptionOopOpr()*/,
|
|
934 |
Rexception_pc = R4 /*LIRGenerator::exceptionPcOpr()*/;
|
|
935 |
|
|
936 |
switch (id) {
|
|
937 |
case forward_exception_id:
|
|
938 |
// We're handling an exception in the context of a compiled frame.
|
|
939 |
// The registers have been saved in the standard places. Perform
|
|
940 |
// an exception lookup in the caller and dispatch to the handler
|
|
941 |
// if found. Otherwise unwind and dispatch to the callers
|
|
942 |
// exception handler.
|
|
943 |
oop_map = generate_oop_map(sasm, true);
|
|
944 |
// Transfer the pending exception to the exception_oop.
|
|
945 |
// Also load the PC which is typically at SP + frame_size_in_bytes + _abi(lr),
|
|
946 |
// but we support additional slots in the frame for parameter passing.
|
|
947 |
__ ld(Rexception_pc, 0, R1_SP);
|
|
948 |
__ ld(Rexception, in_bytes(JavaThread::pending_exception_offset()), R16_thread);
|
|
949 |
__ li(R0, 0);
|
|
950 |
__ ld(Rexception_pc, _abi(lr), Rexception_pc);
|
|
951 |
__ std(R0, in_bytes(JavaThread::pending_exception_offset()), R16_thread);
|
|
952 |
break;
|
|
953 |
case handle_exception_nofpu_id:
|
|
954 |
case handle_exception_id:
|
|
955 |
// At this point all registers MAY be live.
|
|
956 |
oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id, Rexception_pc);
|
|
957 |
break;
|
|
958 |
case handle_exception_from_callee_id:
|
|
959 |
// At this point all registers except exception oop and exception pc are dead.
|
|
960 |
oop_map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
|
|
961 |
sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);
|
|
962 |
__ std(Rexception_pc, _abi(lr), R1_SP);
|
|
963 |
__ push_frame(frame_size_in_bytes, R0);
|
|
964 |
break;
|
|
965 |
default: ShouldNotReachHere();
|
|
966 |
}
|
|
967 |
|
|
968 |
__ verify_not_null_oop(Rexception);
|
|
969 |
|
|
970 |
#ifdef ASSERT
|
|
971 |
// Check that fields in JavaThread for exception oop and issuing pc are
|
|
972 |
// empty before writing to them.
|
|
973 |
__ ld(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
|
|
974 |
__ cmpdi(CCR0, R0, 0);
|
|
975 |
__ asm_assert_eq("exception oop already set", 0x963);
|
|
976 |
__ ld(R0, in_bytes(JavaThread::exception_pc_offset() ), R16_thread);
|
|
977 |
__ cmpdi(CCR0, R0, 0);
|
|
978 |
__ asm_assert_eq("exception pc already set", 0x962);
|
|
979 |
#endif
|
|
980 |
|
|
981 |
// Save the exception and issuing pc in the thread.
|
|
982 |
__ std(Rexception, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
|
|
983 |
__ std(Rexception_pc, in_bytes(JavaThread::exception_pc_offset() ), R16_thread);
|
|
984 |
|
|
985 |
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
|
|
986 |
oop_maps->add_gc_map(call_offset, oop_map);
|
|
987 |
|
|
988 |
__ mtctr(R3_RET);
|
|
989 |
|
|
990 |
// Note: if nmethod has been deoptimized then regardless of
|
|
991 |
// whether it had a handler or not we will deoptimize
|
|
992 |
// by entering the deopt blob with a pending exception.
|
|
993 |
|
|
994 |
// Restore the registers that were saved at the beginning, remove
|
|
995 |
// the frame and jump to the exception handler.
|
|
996 |
switch (id) {
|
|
997 |
case forward_exception_id:
|
|
998 |
case handle_exception_nofpu_id:
|
|
999 |
case handle_exception_id:
|
|
1000 |
restore_live_registers(sasm, noreg, noreg, id != handle_exception_nofpu_id);
|
|
1001 |
__ bctr();
|
|
1002 |
break;
|
|
1003 |
case handle_exception_from_callee_id: {
|
|
1004 |
__ pop_frame();
|
|
1005 |
__ ld(Rexception_pc, _abi(lr), R1_SP);
|
|
1006 |
__ mtlr(Rexception_pc);
|
|
1007 |
__ bctr();
|
|
1008 |
break;
|
|
1009 |
}
|
|
1010 |
default: ShouldNotReachHere();
|
|
1011 |
}
|
|
1012 |
|
|
1013 |
return oop_maps;
|
|
1014 |
}
|
|
1015 |
|
|
1016 |
const char *Runtime1::pd_name_for_address(address entry) {
|
|
1017 |
return "<unknown function>";
|
|
1018 |
}
|
|
1019 |
|
|
1020 |
#undef __
|