author | stefank |
Thu, 13 Apr 2017 09:57:51 +0200 | |
changeset 46620 | 750c6edff33b |
parent 42065 | 6032b31e3719 |
child 46625 | edefffab74e2 |
permissions | -rw-r--r-- |
42065 | 1 |
/* |
2 |
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. |
|
3 |
* Copyright (c) 2016 SAP SE. All rights reserved. |
|
4 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
5 |
* |
|
6 |
* This code is free software; you can redistribute it and/or modify it |
|
7 |
* under the terms of the GNU General Public License version 2 only, as |
|
8 |
* published by the Free Software Foundation. |
|
9 |
* |
|
10 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
11 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
12 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
13 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
14 |
* accompanied this code). |
|
15 |
* |
|
16 |
* You should have received a copy of the GNU General Public License version |
|
17 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
18 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
19 |
* |
|
20 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
21 |
* or visit www.oracle.com if you need additional information or have any |
|
22 |
* questions. |
|
23 |
* |
|
24 |
*/ |
|
25 |
||
26 |
#include "precompiled.hpp" |
|
27 |
#include "c1/c1_CodeStubs.hpp" |
|
28 |
#include "c1/c1_FrameMap.hpp" |
|
29 |
#include "c1/c1_LIRAssembler.hpp" |
|
30 |
#include "c1/c1_MacroAssembler.hpp" |
|
31 |
#include "c1/c1_Runtime1.hpp" |
|
32 |
#include "nativeInst_s390.hpp" |
|
33 |
#include "runtime/sharedRuntime.hpp" |
|
34 |
#include "utilities/macros.hpp" |
|
35 |
#include "vmreg_s390.inline.hpp" |
|
36 |
#if INCLUDE_ALL_GCS |
|
37 |
#include "gc/g1/g1SATBCardTableModRefBS.hpp" |
|
38 |
#endif // INCLUDE_ALL_GCS |
|
39 |
||
40 |
#define __ ce->masm()-> |
|
41 |
#undef CHECK_BAILOUT |
|
42 |
#define CHECK_BAILOUT() { if (ce->compilation()->bailed_out()) return; } |
|
43 |
||
44 |
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, |
|
45 |
bool throw_index_out_of_bounds_exception) : |
|
46 |
_throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception), |
|
47 |
_index(index) { |
|
48 |
assert(info != NULL, "must have info"); |
|
49 |
_info = new CodeEmitInfo(info); |
|
50 |
} |
|
51 |
||
52 |
void RangeCheckStub::emit_code(LIR_Assembler* ce) { |
|
53 |
__ bind(_entry); |
|
54 |
if (_info->deoptimize_on_exception()) { |
|
55 |
address a = Runtime1::entry_for (Runtime1::predicate_failed_trap_id); |
|
56 |
ce->emit_call_c(a); |
|
57 |
CHECK_BAILOUT(); |
|
58 |
ce->add_call_info_here(_info); |
|
59 |
ce->verify_oop_map(_info); |
|
60 |
debug_only(__ should_not_reach_here()); |
|
61 |
return; |
|
62 |
} |
|
63 |
||
64 |
// Pass the array index in Z_R1_scratch which is not managed by linear scan. |
|
65 |
if (_index->is_cpu_register()) { |
|
66 |
__ lgr_if_needed(Z_R1_scratch, _index->as_register()); |
|
67 |
} else { |
|
68 |
__ load_const_optimized(Z_R1_scratch, _index->as_jint()); |
|
69 |
} |
|
70 |
||
71 |
Runtime1::StubID stub_id; |
|
72 |
if (_throw_index_out_of_bounds_exception) { |
|
73 |
stub_id = Runtime1::throw_index_exception_id; |
|
74 |
} else { |
|
75 |
stub_id = Runtime1::throw_range_check_failed_id; |
|
76 |
} |
|
77 |
ce->emit_call_c(Runtime1::entry_for (stub_id)); |
|
78 |
CHECK_BAILOUT(); |
|
79 |
ce->add_call_info_here(_info); |
|
80 |
ce->verify_oop_map(_info); |
|
81 |
debug_only(__ should_not_reach_here()); |
|
82 |
} |
|
83 |
||
84 |
PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { |
|
85 |
_info = new CodeEmitInfo(info); |
|
86 |
} |
|
87 |
||
88 |
void PredicateFailedStub::emit_code(LIR_Assembler* ce) { |
|
89 |
__ bind(_entry); |
|
90 |
address a = Runtime1::entry_for (Runtime1::predicate_failed_trap_id); |
|
91 |
ce->emit_call_c(a); |
|
92 |
CHECK_BAILOUT(); |
|
93 |
ce->add_call_info_here(_info); |
|
94 |
ce->verify_oop_map(_info); |
|
95 |
debug_only(__ should_not_reach_here()); |
|
96 |
} |
|
97 |
||
98 |
void CounterOverflowStub::emit_code(LIR_Assembler* ce) { |
|
99 |
__ bind(_entry); |
|
100 |
Metadata *m = _method->as_constant_ptr()->as_metadata(); |
|
101 |
bool success = __ set_metadata_constant(m, Z_R1_scratch); |
|
102 |
if (!success) { |
|
103 |
ce->compilation()->bailout("const section overflow"); |
|
104 |
return; |
|
105 |
} |
|
106 |
ce->store_parameter(/*_method->as_register()*/ Z_R1_scratch, 1); |
|
107 |
ce->store_parameter(_bci, 0); |
|
108 |
ce->emit_call_c(Runtime1::entry_for (Runtime1::counter_overflow_id)); |
|
109 |
CHECK_BAILOUT(); |
|
110 |
ce->add_call_info_here(_info); |
|
111 |
ce->verify_oop_map(_info); |
|
112 |
__ branch_optimized(Assembler::bcondAlways, _continuation); |
|
113 |
} |
|
114 |
||
115 |
void DivByZeroStub::emit_code(LIR_Assembler* ce) { |
|
116 |
if (_offset != -1) { |
|
117 |
ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); |
|
118 |
} |
|
119 |
__ bind(_entry); |
|
120 |
ce->emit_call_c(Runtime1::entry_for (Runtime1::throw_div0_exception_id)); |
|
121 |
CHECK_BAILOUT(); |
|
122 |
ce->add_call_info_here(_info); |
|
123 |
debug_only(__ should_not_reach_here()); |
|
124 |
} |
|
125 |
||
126 |
void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { |
|
127 |
address a; |
|
128 |
if (_info->deoptimize_on_exception()) { |
|
129 |
// Deoptimize, do not throw the exception, because it is probably wrong to do it here. |
|
130 |
a = Runtime1::entry_for (Runtime1::predicate_failed_trap_id); |
|
131 |
} else { |
|
132 |
a = Runtime1::entry_for (Runtime1::throw_null_pointer_exception_id); |
|
133 |
} |
|
134 |
||
135 |
ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); |
|
136 |
__ bind(_entry); |
|
137 |
ce->emit_call_c(a); |
|
138 |
CHECK_BAILOUT(); |
|
139 |
ce->add_call_info_here(_info); |
|
140 |
ce->verify_oop_map(_info); |
|
141 |
debug_only(__ should_not_reach_here()); |
|
142 |
} |
|
143 |
||
144 |
// Note: pass object in Z_R1_scratch |
|
145 |
void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { |
|
146 |
__ bind(_entry); |
|
147 |
if (_obj->is_valid()) { |
|
148 |
__ z_lgr(Z_R1_scratch, _obj->as_register()); // _obj contains the optional argument to the stub |
|
149 |
} |
|
150 |
address a = Runtime1::entry_for (_stub); |
|
151 |
ce->emit_call_c(a); |
|
152 |
CHECK_BAILOUT(); |
|
153 |
ce->add_call_info_here(_info); |
|
154 |
debug_only(__ should_not_reach_here()); |
|
155 |
} |
|
156 |
||
157 |
NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) { |
|
158 |
_result = result; |
|
159 |
_klass = klass; |
|
160 |
_klass_reg = klass_reg; |
|
161 |
_info = new CodeEmitInfo(info); |
|
162 |
assert(stub_id == Runtime1::new_instance_id || |
|
163 |
stub_id == Runtime1::fast_new_instance_id || |
|
164 |
stub_id == Runtime1::fast_new_instance_init_check_id, |
|
165 |
"need new_instance id"); |
|
166 |
_stub_id = stub_id; |
|
167 |
} |
|
168 |
||
169 |
void NewInstanceStub::emit_code(LIR_Assembler* ce) { |
|
170 |
__ bind(_entry); |
|
171 |
assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11"); |
|
172 |
address a = Runtime1::entry_for (_stub_id); |
|
173 |
ce->emit_call_c(a); |
|
174 |
CHECK_BAILOUT(); |
|
175 |
ce->add_call_info_here(_info); |
|
176 |
ce->verify_oop_map(_info); |
|
177 |
assert(_result->as_register() == Z_R2, "callee returns result in Z_R2,"); |
|
178 |
__ z_brul(_continuation); |
|
179 |
} |
|
180 |
||
181 |
NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { |
|
182 |
_klass_reg = klass_reg; |
|
183 |
_length = length; |
|
184 |
_result = result; |
|
185 |
_info = new CodeEmitInfo(info); |
|
186 |
} |
|
187 |
||
188 |
void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { |
|
189 |
__ bind(_entry); |
|
190 |
assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11"); |
|
191 |
__ lgr_if_needed(Z_R13, _length->as_register()); |
|
192 |
address a = Runtime1::entry_for (Runtime1::new_type_array_id); |
|
193 |
ce->emit_call_c(a); |
|
194 |
CHECK_BAILOUT(); |
|
195 |
ce->add_call_info_here(_info); |
|
196 |
ce->verify_oop_map(_info); |
|
197 |
assert(_result->as_register() == Z_R2, "callee returns result in Z_R2,"); |
|
198 |
__ z_brul(_continuation); |
|
199 |
} |
|
200 |
||
201 |
NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { |
|
202 |
_klass_reg = klass_reg; |
|
203 |
_length = length; |
|
204 |
_result = result; |
|
205 |
_info = new CodeEmitInfo(info); |
|
206 |
} |
|
207 |
||
208 |
void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { |
|
209 |
__ bind(_entry); |
|
210 |
assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11"); |
|
211 |
__ lgr_if_needed(Z_R13, _length->as_register()); |
|
212 |
address a = Runtime1::entry_for (Runtime1::new_object_array_id); |
|
213 |
ce->emit_call_c(a); |
|
214 |
CHECK_BAILOUT(); |
|
215 |
ce->add_call_info_here(_info); |
|
216 |
ce->verify_oop_map(_info); |
|
217 |
assert(_result->as_register() == Z_R2, "callee returns result in Z_R2,"); |
|
218 |
__ z_brul(_continuation); |
|
219 |
} |
|
220 |
||
221 |
MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info) |
|
222 |
: MonitorAccessStub(obj_reg, lock_reg) { |
|
223 |
_info = new CodeEmitInfo(info); |
|
224 |
} |
|
225 |
||
226 |
void MonitorEnterStub::emit_code(LIR_Assembler* ce) { |
|
227 |
__ bind(_entry); |
|
228 |
Runtime1::StubID enter_id; |
|
229 |
if (ce->compilation()->has_fpu_code()) { |
|
230 |
enter_id = Runtime1::monitorenter_id; |
|
231 |
} else { |
|
232 |
enter_id = Runtime1::monitorenter_nofpu_id; |
|
233 |
} |
|
234 |
__ lgr_if_needed(Z_R1_scratch, _obj_reg->as_register()); |
|
235 |
__ lgr_if_needed(Z_R13, _lock_reg->as_register()); // See LIRGenerator::syncTempOpr(). |
|
236 |
ce->emit_call_c(Runtime1::entry_for (enter_id)); |
|
237 |
CHECK_BAILOUT(); |
|
238 |
ce->add_call_info_here(_info); |
|
239 |
ce->verify_oop_map(_info); |
|
240 |
__ branch_optimized(Assembler::bcondAlways, _continuation); |
|
241 |
} |
|
242 |
||
243 |
void MonitorExitStub::emit_code(LIR_Assembler* ce) { |
|
244 |
__ bind(_entry); |
|
245 |
// Move address of the BasicObjectLock into Z_R1_scratch. |
|
246 |
if (_compute_lock) { |
|
247 |
// Lock_reg was destroyed by fast unlocking attempt => recompute it. |
|
248 |
ce->monitor_address(_monitor_ix, FrameMap::as_opr(Z_R1_scratch)); |
|
249 |
} else { |
|
250 |
__ lgr_if_needed(Z_R1_scratch, _lock_reg->as_register()); |
|
251 |
} |
|
252 |
// Note: non-blocking leaf routine => no call info needed. |
|
253 |
Runtime1::StubID exit_id; |
|
254 |
if (ce->compilation()->has_fpu_code()) { |
|
255 |
exit_id = Runtime1::monitorexit_id; |
|
256 |
} else { |
|
257 |
exit_id = Runtime1::monitorexit_nofpu_id; |
|
258 |
} |
|
259 |
ce->emit_call_c(Runtime1::entry_for (exit_id)); |
|
260 |
CHECK_BAILOUT(); |
|
261 |
__ branch_optimized(Assembler::bcondAlways, _continuation); |
|
262 |
} |
|
263 |
||
264 |
// Implementation of patching: |
|
265 |
// - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes). |
|
266 |
// - Replace original code with a call to the stub. |
|
267 |
// At Runtime: |
|
268 |
// - call to stub, jump to runtime. |
|
269 |
// - in runtime: Preserve all registers (especially objects, i.e., source and destination object). |
|
270 |
// - in runtime: After initializing class, restore original code, reexecute instruction. |
|
271 |
||
272 |
int PatchingStub::_patch_info_offset = - (12 /* load const */ + 2 /*BASR*/); |
|
273 |
||
274 |
void PatchingStub::align_patch_site(MacroAssembler* masm) { |
|
275 |
#ifndef PRODUCT |
|
276 |
const char* bc; |
|
277 |
switch (_id) { |
|
278 |
case access_field_id: bc = "patch site (access_field)"; break; |
|
279 |
case load_klass_id: bc = "patch site (load_klass)"; break; |
|
280 |
case load_mirror_id: bc = "patch site (load_mirror)"; break; |
|
281 |
case load_appendix_id: bc = "patch site (load_appendix)"; break; |
|
282 |
default: bc = "patch site (unknown patch id)"; break; |
|
283 |
} |
|
284 |
masm->block_comment(bc); |
|
285 |
#endif |
|
286 |
||
46620
750c6edff33b
8178500: Replace usages of round_to and round_down with align_up and align_down
stefank
parents:
42065
diff
changeset
|
287 |
masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize)); |
42065 | 288 |
} |
289 |
||
290 |
void PatchingStub::emit_code(LIR_Assembler* ce) { |
|
291 |
// Copy original code here. |
|
292 |
assert(NativeGeneralJump::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, |
|
293 |
"not enough room for call"); |
|
294 |
||
295 |
NearLabel call_patch; |
|
296 |
||
297 |
int being_initialized_entry = __ offset(); |
|
298 |
||
299 |
if (_id == load_klass_id) { |
|
300 |
// Produce a copy of the load klass instruction for use by the case being initialized. |
|
301 |
#ifdef ASSERT |
|
302 |
address start = __ pc(); |
|
303 |
#endif |
|
304 |
AddressLiteral addrlit((intptr_t)0, metadata_Relocation::spec(_index)); |
|
305 |
__ load_const(_obj, addrlit); |
|
306 |
||
307 |
#ifdef ASSERT |
|
308 |
for (int i = 0; i < _bytes_to_copy; i++) { |
|
309 |
address ptr = (address)(_pc_start + i); |
|
310 |
int a_byte = (*ptr) & 0xFF; |
|
311 |
assert(a_byte == *start++, "should be the same code"); |
|
312 |
} |
|
313 |
#endif |
|
314 |
} else if (_id == load_mirror_id || _id == load_appendix_id) { |
|
315 |
// Produce a copy of the load mirror instruction for use by the case being initialized. |
|
316 |
#ifdef ASSERT |
|
317 |
address start = __ pc(); |
|
318 |
#endif |
|
319 |
AddressLiteral addrlit((intptr_t)0, oop_Relocation::spec(_index)); |
|
320 |
__ load_const(_obj, addrlit); |
|
321 |
||
322 |
#ifdef ASSERT |
|
323 |
for (int i = 0; i < _bytes_to_copy; i++) { |
|
324 |
address ptr = (address)(_pc_start + i); |
|
325 |
int a_byte = (*ptr) & 0xFF; |
|
326 |
assert(a_byte == *start++, "should be the same code"); |
|
327 |
} |
|
328 |
#endif |
|
329 |
} else { |
|
330 |
// Make a copy the code which is going to be patched. |
|
331 |
for (int i = 0; i < _bytes_to_copy; i++) { |
|
332 |
address ptr = (address)(_pc_start + i); |
|
333 |
int a_byte = (*ptr) & 0xFF; |
|
334 |
__ emit_int8 (a_byte); |
|
335 |
} |
|
336 |
} |
|
337 |
||
338 |
address end_of_patch = __ pc(); |
|
339 |
int bytes_to_skip = 0; |
|
340 |
if (_id == load_mirror_id) { |
|
341 |
int offset = __ offset(); |
|
342 |
if (CommentedAssembly) { |
|
343 |
__ block_comment(" being_initialized check"); |
|
344 |
} |
|
345 |
||
346 |
// Static field accesses have special semantics while the class |
|
347 |
// initializer is being run, so we emit a test which can be used to |
|
348 |
// check that this code is being executed by the initializing |
|
349 |
// thread. |
|
350 |
assert(_obj != noreg, "must be a valid register"); |
|
351 |
assert(_index >= 0, "must have oop index"); |
|
352 |
__ z_lg(Z_R1_scratch, java_lang_Class::klass_offset_in_bytes(), _obj); |
|
353 |
__ z_cg(Z_thread, Address(Z_R1_scratch, InstanceKlass::init_thread_offset())); |
|
354 |
__ branch_optimized(Assembler::bcondNotEqual, call_patch); |
|
355 |
||
356 |
// Load_klass patches may execute the patched code before it's |
|
357 |
// copied back into place so we need to jump back into the main |
|
358 |
// code of the nmethod to continue execution. |
|
359 |
__ branch_optimized(Assembler::bcondAlways, _patch_site_continuation); |
|
360 |
||
361 |
// Make sure this extra code gets skipped. |
|
362 |
bytes_to_skip += __ offset() - offset; |
|
363 |
} |
|
364 |
||
365 |
// Now emit the patch record telling the runtime how to find the |
|
366 |
// pieces of the patch. We only need 3 bytes but to help the disassembler |
|
367 |
// we make the data look like a the following add instruction: |
|
368 |
// A R1, D2(X2, B2) |
|
369 |
// which requires 4 bytes. |
|
370 |
int sizeof_patch_record = 4; |
|
371 |
bytes_to_skip += sizeof_patch_record; |
|
372 |
||
373 |
// Emit the offsets needed to find the code to patch. |
|
374 |
int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record; |
|
375 |
||
376 |
// Emit the patch record: opcode of the add followed by 3 bytes patch record data. |
|
377 |
__ emit_int8((int8_t)(A_ZOPC>>24)); |
|
378 |
__ emit_int8(being_initialized_entry_offset); |
|
379 |
__ emit_int8(bytes_to_skip); |
|
380 |
__ emit_int8(_bytes_to_copy); |
|
381 |
address patch_info_pc = __ pc(); |
|
382 |
assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info"); |
|
383 |
||
384 |
address entry = __ pc(); |
|
385 |
NativeGeneralJump::insert_unconditional((address)_pc_start, entry); |
|
386 |
address target = NULL; |
|
387 |
relocInfo::relocType reloc_type = relocInfo::none; |
|
388 |
switch (_id) { |
|
389 |
case access_field_id: target = Runtime1::entry_for (Runtime1::access_field_patching_id); break; |
|
390 |
case load_klass_id: target = Runtime1::entry_for (Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; |
|
391 |
case load_mirror_id: target = Runtime1::entry_for (Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; |
|
392 |
case load_appendix_id: target = Runtime1::entry_for (Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; |
|
393 |
default: ShouldNotReachHere(); |
|
394 |
} |
|
395 |
__ bind(call_patch); |
|
396 |
||
397 |
if (CommentedAssembly) { |
|
398 |
__ block_comment("patch entry point"); |
|
399 |
} |
|
400 |
// Cannot use call_c_opt() because its size is not constant. |
|
401 |
__ load_const(Z_R1_scratch, target); // Must not optimize in order to keep constant _patch_info_offset constant. |
|
402 |
__ z_basr(Z_R14, Z_R1_scratch); |
|
403 |
assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change"); |
|
404 |
ce->add_call_info_here(_info); |
|
405 |
__ z_brcl(Assembler::bcondAlways, _patch_site_entry); |
|
406 |
if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) { |
|
407 |
CodeSection* cs = __ code_section(); |
|
408 |
address pc = (address)_pc_start; |
|
409 |
RelocIterator iter(cs, pc, pc + 1); |
|
410 |
relocInfo::change_reloc_info_for_address(&iter, (address) pc, reloc_type, relocInfo::none); |
|
411 |
} |
|
412 |
} |
|
413 |
||
414 |
void DeoptimizeStub::emit_code(LIR_Assembler* ce) { |
|
415 |
__ bind(_entry); |
|
416 |
__ load_const_optimized(Z_R1_scratch, _trap_request); // Pass trap request in Z_R1_scratch. |
|
417 |
ce->emit_call_c(Runtime1::entry_for (Runtime1::deoptimize_id)); |
|
418 |
CHECK_BAILOUT(); |
|
419 |
ce->add_call_info_here(_info); |
|
420 |
DEBUG_ONLY(__ should_not_reach_here()); |
|
421 |
} |
|
422 |
||
423 |
void ArrayCopyStub::emit_code(LIR_Assembler* ce) { |
|
424 |
// Slow case: call to native. |
|
425 |
__ bind(_entry); |
|
426 |
__ lgr_if_needed(Z_ARG1, src()->as_register()); |
|
427 |
__ lgr_if_needed(Z_ARG2, src_pos()->as_register()); |
|
428 |
__ lgr_if_needed(Z_ARG3, dst()->as_register()); |
|
429 |
__ lgr_if_needed(Z_ARG4, dst_pos()->as_register()); |
|
430 |
__ lgr_if_needed(Z_ARG5, length()->as_register()); |
|
431 |
||
432 |
// Must align calls sites, otherwise they can't be updated atomically on MP hardware. |
|
433 |
ce->align_call(lir_static_call); |
|
434 |
||
435 |
assert((__ offset() + NativeCall::call_far_pcrelative_displacement_offset) % NativeCall::call_far_pcrelative_displacement_alignment == 0, |
|
436 |
"must be aligned"); |
|
437 |
||
438 |
ce->emit_static_call_stub(); |
|
439 |
||
440 |
// Prepend each BRASL with a nop. |
|
441 |
__ relocate(relocInfo::static_call_type); |
|
442 |
__ z_nop(); |
|
443 |
__ z_brasl(Z_R14, SharedRuntime::get_resolve_static_call_stub()); |
|
444 |
ce->add_call_info_here(info()); |
|
445 |
ce->verify_oop_map(info()); |
|
446 |
||
447 |
#ifndef PRODUCT |
|
448 |
__ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_arraycopy_slowcase_cnt); |
|
449 |
__ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch); |
|
450 |
#endif |
|
451 |
||
452 |
__ branch_optimized(Assembler::bcondAlways, _continuation); |
|
453 |
} |
|
454 |
||
455 |
||
456 |
/////////////////////////////////////////////////////////////////////////////////// |
|
457 |
#if INCLUDE_ALL_GCS |
|
458 |
||
459 |
void G1PreBarrierStub::emit_code(LIR_Assembler* ce) { |
|
460 |
// At this point we know that marking is in progress. |
|
461 |
// If do_load() is true then we have to emit the |
|
462 |
// load of the previous value; otherwise it has already |
|
463 |
// been loaded into _pre_val. |
|
464 |
__ bind(_entry); |
|
465 |
ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots. |
|
466 |
assert(pre_val()->is_register(), "Precondition."); |
|
467 |
||
468 |
Register pre_val_reg = pre_val()->as_register(); |
|
469 |
||
470 |
if (do_load()) { |
|
471 |
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/); |
|
472 |
} |
|
473 |
||
474 |
__ z_ltgr(Z_R1_scratch, pre_val_reg); // Pass oop in Z_R1_scratch to Runtime1::g1_pre_barrier_slow_id. |
|
475 |
__ branch_optimized(Assembler::bcondZero, _continuation); |
|
476 |
ce->emit_call_c(Runtime1::entry_for (Runtime1::g1_pre_barrier_slow_id)); |
|
477 |
CHECK_BAILOUT(); |
|
478 |
__ branch_optimized(Assembler::bcondAlways, _continuation); |
|
479 |
} |
|
480 |
||
481 |
void G1PostBarrierStub::emit_code(LIR_Assembler* ce) { |
|
482 |
__ bind(_entry); |
|
483 |
ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots. |
|
484 |
assert(addr()->is_register(), "Precondition."); |
|
485 |
assert(new_val()->is_register(), "Precondition."); |
|
486 |
Register new_val_reg = new_val()->as_register(); |
|
487 |
__ z_ltgr(new_val_reg, new_val_reg); |
|
488 |
__ branch_optimized(Assembler::bcondZero, _continuation); |
|
489 |
__ z_lgr(Z_R1_scratch, addr()->as_pointer_register()); |
|
490 |
ce->emit_call_c(Runtime1::entry_for (Runtime1::g1_post_barrier_slow_id)); |
|
491 |
CHECK_BAILOUT(); |
|
492 |
__ branch_optimized(Assembler::bcondAlways, _continuation); |
|
493 |
} |
|
494 |
||
495 |
#endif // INCLUDE_ALL_GCS |
|
496 |
||
497 |
#undef __ |