|
1 /* |
|
2 * Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #include "precompiled.hpp" |
|
26 #include "asm/macroAssembler.hpp" |
|
27 #include "c1/c1_CodeStubs.hpp" |
|
28 #include "c1/c1_FrameMap.hpp" |
|
29 #include "c1/c1_LIRAssembler.hpp" |
|
30 #include "c1/c1_MacroAssembler.hpp" |
|
31 #include "c1/c1_Runtime1.hpp" |
|
32 #include "nativeInst_arm.hpp" |
|
33 #include "runtime/sharedRuntime.hpp" |
|
34 #include "utilities/macros.hpp" |
|
35 #include "vmreg_arm.inline.hpp" |
|
36 #if INCLUDE_ALL_GCS |
|
37 #include "gc/g1/g1SATBCardTableModRefBS.hpp" |
|
38 #endif // INCLUDE_ALL_GCS |
|
39 |
|
40 #define __ ce->masm()-> |
|
41 |
|
42 void CounterOverflowStub::emit_code(LIR_Assembler* ce) { |
|
43 __ bind(_entry); |
|
44 ce->store_parameter(_bci, 0); |
|
45 ce->store_parameter(_method->as_constant_ptr()->as_metadata(), 1); |
|
46 __ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type); |
|
47 ce->add_call_info_here(_info); |
|
48 ce->verify_oop_map(_info); |
|
49 |
|
50 __ b(_continuation); |
|
51 } |
|
52 |
|
53 |
|
54 // TODO: ARM - is it possible to inline these stubs into the main code stream? |
|
55 |
|
56 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, |
|
57 bool throw_index_out_of_bounds_exception) |
|
58 : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception) |
|
59 , _index(index) |
|
60 { |
|
61 _info = info == NULL ? NULL : new CodeEmitInfo(info); |
|
62 } |
|
63 |
|
64 |
|
65 void RangeCheckStub::emit_code(LIR_Assembler* ce) { |
|
66 __ bind(_entry); |
|
67 |
|
68 if (_info->deoptimize_on_exception()) { |
|
69 #ifdef AARCH64 |
|
70 __ NOT_TESTED(); |
|
71 #endif |
|
72 __ call(Runtime1::entry_for(Runtime1::predicate_failed_trap_id), relocInfo::runtime_call_type); |
|
73 ce->add_call_info_here(_info); |
|
74 ce->verify_oop_map(_info); |
|
75 debug_only(__ should_not_reach_here()); |
|
76 return; |
|
77 } |
|
78 // Pass the array index on stack because all registers must be preserved |
|
79 ce->verify_reserved_argument_area_size(1); |
|
80 if (_index->is_cpu_register()) { |
|
81 __ str_32(_index->as_register(), Address(SP)); |
|
82 } else { |
|
83 __ mov_slow(Rtemp, _index->as_jint()); // Rtemp should be OK in C1 |
|
84 __ str_32(Rtemp, Address(SP)); |
|
85 } |
|
86 |
|
87 if (_throw_index_out_of_bounds_exception) { |
|
88 #ifdef AARCH64 |
|
89 __ NOT_TESTED(); |
|
90 #endif |
|
91 __ call(Runtime1::entry_for(Runtime1::throw_index_exception_id), relocInfo::runtime_call_type); |
|
92 } else { |
|
93 __ call(Runtime1::entry_for(Runtime1::throw_range_check_failed_id), relocInfo::runtime_call_type); |
|
94 } |
|
95 ce->add_call_info_here(_info); |
|
96 ce->verify_oop_map(_info); |
|
97 DEBUG_ONLY(STOP("RangeCheck");) |
|
98 } |
|
99 |
|
100 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { |
|
101 _info = new CodeEmitInfo(info); |
|
102 } |
|
103 |
|
104 void PredicateFailedStub::emit_code(LIR_Assembler* ce) { |
|
105 __ bind(_entry); |
|
106 __ call(Runtime1::entry_for(Runtime1::predicate_failed_trap_id), relocInfo::runtime_call_type); |
|
107 ce->add_call_info_here(_info); |
|
108 ce->verify_oop_map(_info); |
|
109 debug_only(__ should_not_reach_here()); |
|
110 } |
|
111 |
|
112 void DivByZeroStub::emit_code(LIR_Assembler* ce) { |
|
113 if (_offset != -1) { |
|
114 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); |
|
115 } |
|
116 __ bind(_entry); |
|
117 __ call(Runtime1::entry_for(Runtime1::throw_div0_exception_id), |
|
118 relocInfo::runtime_call_type); |
|
119 ce->add_call_info_here(_info); |
|
120 DEBUG_ONLY(STOP("DivByZero");) |
|
121 } |
|
122 |
|
123 |
|
124 // Implementation of NewInstanceStub |
|
125 |
|
126 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) { |
|
127 _result = result; |
|
128 _klass = klass; |
|
129 _klass_reg = klass_reg; |
|
130 _info = new CodeEmitInfo(info); |
|
131 assert(stub_id == Runtime1::new_instance_id || |
|
132 stub_id == Runtime1::fast_new_instance_id || |
|
133 stub_id == Runtime1::fast_new_instance_init_check_id, |
|
134 "need new_instance id"); |
|
135 _stub_id = stub_id; |
|
136 } |
|
137 |
|
138 |
|
139 void NewInstanceStub::emit_code(LIR_Assembler* ce) { |
|
140 assert(_result->as_register() == R0, "runtime call setup"); |
|
141 assert(_klass_reg->as_register() == R1, "runtime call setup"); |
|
142 __ bind(_entry); |
|
143 __ call(Runtime1::entry_for(_stub_id), relocInfo::runtime_call_type); |
|
144 ce->add_call_info_here(_info); |
|
145 ce->verify_oop_map(_info); |
|
146 __ b(_continuation); |
|
147 } |
|
148 |
|
149 |
|
150 // Implementation of NewTypeArrayStub |
|
151 |
|
152 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { |
|
153 _klass_reg = klass_reg; |
|
154 _length = length; |
|
155 _result = result; |
|
156 _info = new CodeEmitInfo(info); |
|
157 } |
|
158 |
|
159 |
|
160 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { |
|
161 assert(_result->as_register() == R0, "runtime call setup"); |
|
162 assert(_klass_reg->as_register() == R1, "runtime call setup"); |
|
163 assert(_length->as_register() == R2, "runtime call setup"); |
|
164 __ bind(_entry); |
|
165 __ call(Runtime1::entry_for(Runtime1::new_type_array_id), relocInfo::runtime_call_type); |
|
166 ce->add_call_info_here(_info); |
|
167 ce->verify_oop_map(_info); |
|
168 __ b(_continuation); |
|
169 } |
|
170 |
|
171 |
|
172 // Implementation of NewObjectArrayStub |
|
173 |
|
174 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { |
|
175 _klass_reg = klass_reg; |
|
176 _result = result; |
|
177 _length = length; |
|
178 _info = new CodeEmitInfo(info); |
|
179 } |
|
180 |
|
181 |
|
182 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { |
|
183 assert(_result->as_register() == R0, "runtime call setup"); |
|
184 assert(_klass_reg->as_register() == R1, "runtime call setup"); |
|
185 assert(_length->as_register() == R2, "runtime call setup"); |
|
186 __ bind(_entry); |
|
187 __ call(Runtime1::entry_for(Runtime1::new_object_array_id), relocInfo::runtime_call_type); |
|
188 ce->add_call_info_here(_info); |
|
189 ce->verify_oop_map(_info); |
|
190 __ b(_continuation); |
|
191 } |
|
192 |
|
193 |
|
194 // Implementation of MonitorAccessStubs |
|
195 |
|
196 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info) |
|
197 : MonitorAccessStub(obj_reg, lock_reg) |
|
198 { |
|
199 _info = new CodeEmitInfo(info); |
|
200 } |
|
201 |
|
202 |
|
203 void MonitorEnterStub::emit_code(LIR_Assembler* ce) { |
|
204 __ bind(_entry); |
|
205 const Register obj_reg = _obj_reg->as_pointer_register(); |
|
206 const Register lock_reg = _lock_reg->as_pointer_register(); |
|
207 |
|
208 ce->verify_reserved_argument_area_size(2); |
|
209 #ifdef AARCH64 |
|
210 __ stp(obj_reg, lock_reg, Address(SP)); |
|
211 #else |
|
212 if (obj_reg < lock_reg) { |
|
213 __ stmia(SP, RegisterSet(obj_reg) | RegisterSet(lock_reg)); |
|
214 } else { |
|
215 __ str(obj_reg, Address(SP)); |
|
216 __ str(lock_reg, Address(SP, BytesPerWord)); |
|
217 } |
|
218 #endif // AARCH64 |
|
219 |
|
220 Runtime1::StubID enter_id = ce->compilation()->has_fpu_code() ? |
|
221 Runtime1::monitorenter_id : |
|
222 Runtime1::monitorenter_nofpu_id; |
|
223 __ call(Runtime1::entry_for(enter_id), relocInfo::runtime_call_type); |
|
224 ce->add_call_info_here(_info); |
|
225 ce->verify_oop_map(_info); |
|
226 __ b(_continuation); |
|
227 } |
|
228 |
|
229 |
|
230 void MonitorExitStub::emit_code(LIR_Assembler* ce) { |
|
231 __ bind(_entry); |
|
232 if (_compute_lock) { |
|
233 ce->monitor_address(_monitor_ix, _lock_reg); |
|
234 } |
|
235 const Register lock_reg = _lock_reg->as_pointer_register(); |
|
236 |
|
237 ce->verify_reserved_argument_area_size(1); |
|
238 __ str(lock_reg, Address(SP)); |
|
239 |
|
240 // Non-blocking leaf routine - no call info needed |
|
241 Runtime1::StubID exit_id = ce->compilation()->has_fpu_code() ? |
|
242 Runtime1::monitorexit_id : |
|
243 Runtime1::monitorexit_nofpu_id; |
|
244 __ call(Runtime1::entry_for(exit_id), relocInfo::runtime_call_type); |
|
245 __ b(_continuation); |
|
246 } |
|
247 |
|
248 |
|
249 // Call return is directly after patch word |
|
250 int PatchingStub::_patch_info_offset = 0; |
|
251 |
|
252 void PatchingStub::align_patch_site(MacroAssembler* masm) { |
|
253 #if 0 |
|
254 // TODO: investigate if we required to implement this |
|
255 ShouldNotReachHere(); |
|
256 #endif |
|
257 } |
|
258 |
|
259 void PatchingStub::emit_code(LIR_Assembler* ce) { |
|
260 const int patchable_instruction_offset = AARCH64_ONLY(NativeInstruction::instruction_size) NOT_AARCH64(0); |
|
261 |
|
262 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, |
|
263 "not enough room for call"); |
|
264 assert((_bytes_to_copy & 3) == 0, "must copy a multiple of four bytes"); |
|
265 Label call_patch; |
|
266 bool is_load = (_id == load_klass_id) || (_id == load_mirror_id) || (_id == load_appendix_id); |
|
267 |
|
268 #ifdef AARCH64 |
|
269 assert(nativeInstruction_at(_pc_start)->is_nop(), "required for MT safe patching"); |
|
270 |
|
271 // Same alignment of reg2mem code and PatchingStub code. Required to make copied bind_literal() code properly aligned. |
|
272 __ align(wordSize); |
|
273 #endif // AARCH64 |
|
274 |
|
275 if (is_load NOT_AARCH64(&& !VM_Version::supports_movw())) { |
|
276 address start = __ pc(); |
|
277 |
|
278 // The following sequence duplicates code provided in MacroAssembler::patchable_mov_oop() |
|
279 // without creating relocation info entry. |
|
280 #ifdef AARCH64 |
|
281 // Extra nop for MT safe patching |
|
282 __ nop(); |
|
283 #endif // AARCH64 |
|
284 |
|
285 assert((__ pc() - start) == patchable_instruction_offset, "should be"); |
|
286 #ifdef AARCH64 |
|
287 __ ldr(_obj, __ pc()); |
|
288 #else |
|
289 __ ldr(_obj, Address(PC)); |
|
290 // Extra nop to handle case of large offset of oop placeholder (see NativeMovConstReg::set_data). |
|
291 __ nop(); |
|
292 #endif // AARCH64 |
|
293 |
|
294 #ifdef ASSERT |
|
295 for (int i = 0; i < _bytes_to_copy; i++) { |
|
296 assert(((address)_pc_start)[i] == start[i], "should be the same code"); |
|
297 } |
|
298 #endif // ASSERT |
|
299 } |
|
300 |
|
301 address being_initialized_entry = __ pc(); |
|
302 if (CommentedAssembly) { |
|
303 __ block_comment(" patch template"); |
|
304 } |
|
305 if (is_load) { |
|
306 address start = __ pc(); |
|
307 if (_id == load_mirror_id || _id == load_appendix_id) { |
|
308 __ patchable_mov_oop(_obj, (jobject)Universe::non_oop_word(), _index); |
|
309 } else { |
|
310 __ patchable_mov_metadata(_obj, (Metadata*)Universe::non_oop_word(), _index); |
|
311 } |
|
312 #ifdef ASSERT |
|
313 for (int i = 0; i < _bytes_to_copy; i++) { |
|
314 assert(((address)_pc_start)[i] == start[i], "should be the same code"); |
|
315 } |
|
316 #endif // ASSERT |
|
317 } else { |
|
318 int* start = (int*)_pc_start; |
|
319 int* end = start + (_bytes_to_copy / BytesPerInt); |
|
320 while (start < end) { |
|
321 __ emit_int32(*start++); |
|
322 } |
|
323 } |
|
324 address end_of_patch = __ pc(); |
|
325 |
|
326 int bytes_to_skip = 0; |
|
327 if (_id == load_mirror_id) { |
|
328 int offset = __ offset(); |
|
329 if (CommentedAssembly) { |
|
330 __ block_comment(" being_initialized check"); |
|
331 } |
|
332 |
|
333 assert(_obj != noreg, "must be a valid register"); |
|
334 // Rtemp should be OK in C1 |
|
335 __ ldr(Rtemp, Address(_obj, java_lang_Class::klass_offset_in_bytes())); |
|
336 __ ldr(Rtemp, Address(Rtemp, InstanceKlass::init_thread_offset())); |
|
337 __ cmp(Rtemp, Rthread); |
|
338 __ b(call_patch, ne); |
|
339 __ b(_patch_site_continuation); |
|
340 |
|
341 bytes_to_skip += __ offset() - offset; |
|
342 } |
|
343 |
|
344 if (CommentedAssembly) { |
|
345 __ block_comment("patch data - 3 high bytes of the word"); |
|
346 } |
|
347 const int sizeof_patch_record = 4; |
|
348 bytes_to_skip += sizeof_patch_record; |
|
349 int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record; |
|
350 __ emit_int32(0xff | being_initialized_entry_offset << 8 | bytes_to_skip << 16 | _bytes_to_copy << 24); |
|
351 |
|
352 address patch_info_pc = __ pc(); |
|
353 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info"); |
|
354 |
|
355 // runtime call will return here |
|
356 Label call_return; |
|
357 __ bind(call_return); |
|
358 ce->add_call_info_here(_info); |
|
359 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change"); |
|
360 __ b(_patch_site_entry); |
|
361 |
|
362 address entry = __ pc(); |
|
363 NativeGeneralJump::insert_unconditional((address)_pc_start, entry); |
|
364 address target = NULL; |
|
365 relocInfo::relocType reloc_type = relocInfo::none; |
|
366 switch (_id) { |
|
367 case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break; |
|
368 case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; |
|
369 case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; |
|
370 case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; |
|
371 default: ShouldNotReachHere(); |
|
372 } |
|
373 __ bind(call_patch); |
|
374 |
|
375 if (CommentedAssembly) { |
|
376 __ block_comment("patch entry point"); |
|
377 } |
|
378 |
|
379 // arrange for call to return just after patch word |
|
380 __ adr(LR, call_return); |
|
381 __ jump(target, relocInfo::runtime_call_type, Rtemp); |
|
382 |
|
383 if (is_load) { |
|
384 CodeSection* cs = __ code_section(); |
|
385 address pc = (address)_pc_start; |
|
386 RelocIterator iter(cs, pc, pc + 1); |
|
387 relocInfo::change_reloc_info_for_address(&iter, pc, reloc_type, relocInfo::none); |
|
388 } |
|
389 } |
|
390 |
|
391 void DeoptimizeStub::emit_code(LIR_Assembler* ce) { |
|
392 __ bind(_entry); |
|
393 __ mov_slow(Rtemp, _trap_request); |
|
394 ce->verify_reserved_argument_area_size(1); |
|
395 __ str(Rtemp, Address(SP)); |
|
396 __ call(Runtime1::entry_for(Runtime1::deoptimize_id), relocInfo::runtime_call_type); |
|
397 ce->add_call_info_here(_info); |
|
398 DEBUG_ONLY(__ should_not_reach_here()); |
|
399 } |
|
400 |
|
401 |
|
402 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { |
|
403 address a; |
|
404 if (_info->deoptimize_on_exception()) { |
|
405 // Deoptimize, do not throw the exception, because it is |
|
406 // probably wrong to do it here. |
|
407 a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); |
|
408 } else { |
|
409 a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id); |
|
410 } |
|
411 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); |
|
412 __ bind(_entry); |
|
413 __ call(a, relocInfo::runtime_call_type); |
|
414 ce->add_call_info_here(_info); |
|
415 ce->verify_oop_map(_info); |
|
416 DEBUG_ONLY(STOP("ImplicitNullCheck");) |
|
417 } |
|
418 |
|
419 |
|
420 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { |
|
421 __ bind(_entry); |
|
422 // Pass the object on stack because all registers must be preserved |
|
423 if (_obj->is_cpu_register()) { |
|
424 ce->verify_reserved_argument_area_size(1); |
|
425 __ str(_obj->as_pointer_register(), Address(SP)); |
|
426 } else { |
|
427 assert(_obj->is_illegal(), "should be"); |
|
428 } |
|
429 __ call(Runtime1::entry_for(_stub), relocInfo::runtime_call_type); |
|
430 ce->add_call_info_here(_info); |
|
431 DEBUG_ONLY(STOP("SimpleException");) |
|
432 } |
|
433 |
|
434 |
|
435 void ArrayCopyStub::emit_code(LIR_Assembler* ce) { |
|
436 __ bind(_entry); |
|
437 |
|
438 VMRegPair args[5]; |
|
439 BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT }; |
|
440 SharedRuntime::java_calling_convention(signature, args, 5, true); |
|
441 |
|
442 Register r[5]; |
|
443 r[0] = src()->as_pointer_register(); |
|
444 r[1] = src_pos()->as_register(); |
|
445 r[2] = dst()->as_pointer_register(); |
|
446 r[3] = dst_pos()->as_register(); |
|
447 r[4] = length()->as_register(); |
|
448 |
|
449 for (int i = 0; i < 5; i++) { |
|
450 VMReg arg = args[i].first(); |
|
451 if (arg->is_stack()) { |
|
452 __ str(r[i], Address(SP, arg->reg2stack() * VMRegImpl::stack_slot_size)); |
|
453 } else { |
|
454 assert(r[i] == arg->as_Register(), "Calling conventions must match"); |
|
455 } |
|
456 } |
|
457 |
|
458 ce->emit_static_call_stub(); |
|
459 if (ce->compilation()->bailed_out()) { |
|
460 return; // CodeCache is full |
|
461 } |
|
462 int ret_addr_offset = __ patchable_call(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type); |
|
463 assert(ret_addr_offset == __ offset(), "embedded return address not allowed"); |
|
464 ce->add_call_info_here(info()); |
|
465 ce->verify_oop_map(info()); |
|
466 __ b(_continuation); |
|
467 } |
|
468 |
|
469 ///////////////////////////////////////////////////////////////////////////// |
|
470 #if INCLUDE_ALL_GCS |
|
471 |
|
472 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) { |
|
473 // At this point we know that marking is in progress. |
|
474 // If do_load() is true then we have to emit the |
|
475 // load of the previous value; otherwise it has already |
|
476 // been loaded into _pre_val. |
|
477 |
|
478 __ bind(_entry); |
|
479 assert(pre_val()->is_register(), "Precondition."); |
|
480 |
|
481 Register pre_val_reg = pre_val()->as_register(); |
|
482 |
|
483 if (do_load()) { |
|
484 ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/); |
|
485 } |
|
486 |
|
487 __ cbz(pre_val_reg, _continuation); |
|
488 ce->verify_reserved_argument_area_size(1); |
|
489 __ str(pre_val_reg, Address(SP)); |
|
490 __ call(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id), relocInfo::runtime_call_type); |
|
491 |
|
492 __ b(_continuation); |
|
493 } |
|
494 |
|
495 void G1PostBarrierStub::emit_code(LIR_Assembler* ce) { |
|
496 __ bind(_entry); |
|
497 assert(addr()->is_register(), "Precondition."); |
|
498 assert(new_val()->is_register(), "Precondition."); |
|
499 Register new_val_reg = new_val()->as_register(); |
|
500 __ cbz(new_val_reg, _continuation); |
|
501 ce->verify_reserved_argument_area_size(1); |
|
502 __ str(addr()->as_pointer_register(), Address(SP)); |
|
503 __ call(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id), relocInfo::runtime_call_type); |
|
504 __ b(_continuation); |
|
505 } |
|
506 |
|
507 #endif // INCLUDE_ALL_GCS |
|
508 ///////////////////////////////////////////////////////////////////////////// |
|
509 |
|
510 #undef __ |