/*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "c1/c1_CodeStubs.hpp"
#include "c1/c1_FrameMap.hpp"
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
#include "nativeInst_aarch64.hpp"
#include "runtime/sharedRuntime.hpp"
#include "vmreg_aarch64.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#endif
#define __ ce->masm()->
void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
Metadata *m = _method->as_constant_ptr()->as_metadata();
__ mov_metadata(rscratch1, m);
ce->store_parameter(rscratch1, 1);
ce->store_parameter(_bci, 0);
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
__ b(_continuation);
}
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
bool throw_index_out_of_bounds_exception)
: _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
, _index(index)
{
assert(info != NULL, "must have info");
_info = new CodeEmitInfo(info);
}
void RangeCheckStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
if (_info->deoptimize_on_exception()) {
address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
__ far_call(RuntimeAddress(a));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
return;
}
if (_index->is_cpu_register()) {
__ mov(rscratch1, _index->as_register());
} else {
__ mov(rscratch1, _index->as_jint());
}
Runtime1::StubID stub_id;
if (_throw_index_out_of_bounds_exception) {
stub_id = Runtime1::throw_index_exception_id;
} else {
stub_id = Runtime1::throw_range_check_failed_id;
}
__ far_call(RuntimeAddress(Runtime1::entry_for(stub_id)), NULL, rscratch2);
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
}
PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
_info = new CodeEmitInfo(info);
}
void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
__ far_call(RuntimeAddress(a));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
}
void DivByZeroStub::emit_code(LIR_Assembler* ce) {
if (_offset != -1) {
ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
}
__ bind(_entry);
__ far_call(Address(Runtime1::entry_for(Runtime1::throw_div0_exception_id), relocInfo::runtime_call_type));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
#ifdef ASSERT
__ should_not_reach_here();
#endif
}
// Implementation of NewInstanceStub
NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
_result = result;
_klass = klass;
_klass_reg = klass_reg;
_info = new CodeEmitInfo(info);
assert(stub_id == Runtime1::new_instance_id ||
stub_id == Runtime1::fast_new_instance_id ||
stub_id == Runtime1::fast_new_instance_init_check_id,
"need new_instance id");
_stub_id = stub_id;
}
void NewInstanceStub::emit_code(LIR_Assembler* ce) {
assert(__ rsp_offset() == 0, "frame size should be fixed");
__ bind(_entry);
__ mov(r3, _klass_reg->as_register());
__ far_call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
assert(_result->as_register() == r0, "result must in r0,");
__ b(_continuation);
}
// Implementation of NewTypeArrayStub
// Implementation of NewTypeArrayStub
NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
_klass_reg = klass_reg;
_length = length;
_result = result;
_info = new CodeEmitInfo(info);
}
void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
assert(__ rsp_offset() == 0, "frame size should be fixed");
__ bind(_entry);
assert(_length->as_register() == r19, "length must in r19,");
assert(_klass_reg->as_register() == r3, "klass_reg must in r3");
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
assert(_result->as_register() == r0, "result must in r0");
__ b(_continuation);
}
// Implementation of NewObjectArrayStub
NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
_klass_reg = klass_reg;
_result = result;
_length = length;
_info = new CodeEmitInfo(info);
}
void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
assert(__ rsp_offset() == 0, "frame size should be fixed");
__ bind(_entry);
assert(_length->as_register() == r19, "length must in r19,");
assert(_klass_reg->as_register() == r3, "klass_reg must in r3");
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
assert(_result->as_register() == r0, "result must in r0");
__ b(_continuation);
}
// Implementation of MonitorAccessStubs
MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
: MonitorAccessStub(obj_reg, lock_reg)
{
_info = new CodeEmitInfo(info);
}
void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
assert(__ rsp_offset() == 0, "frame size should be fixed");
__ bind(_entry);
ce->store_parameter(_obj_reg->as_register(), 1);
ce->store_parameter(_lock_reg->as_register(), 0);
Runtime1::StubID enter_id;
if (ce->compilation()->has_fpu_code()) {
enter_id = Runtime1::monitorenter_id;
} else {
enter_id = Runtime1::monitorenter_nofpu_id;
}
__ far_call(RuntimeAddress(Runtime1::entry_for(enter_id)));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
__ b(_continuation);
}
void MonitorExitStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
if (_compute_lock) {
// lock_reg was destroyed by fast unlocking attempt => recompute it
ce->monitor_address(_monitor_ix, _lock_reg);
}
ce->store_parameter(_lock_reg->as_register(), 0);
// note: non-blocking leaf routine => no call info needed
Runtime1::StubID exit_id;
if (ce->compilation()->has_fpu_code()) {
exit_id = Runtime1::monitorexit_id;
} else {
exit_id = Runtime1::monitorexit_nofpu_id;
}
__ adr(lr, _continuation);
__ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id)));
}
// Implementation of patching:
// - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
// - Replace original code with a call to the stub
// At Runtime:
// - call to stub, jump to runtime
// - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
// - in runtime: after initializing class, restore original code, reexecute instruction
int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
void PatchingStub::align_patch_site(MacroAssembler* masm) {
}
void PatchingStub::emit_code(LIR_Assembler* ce) {
assert(false, "AArch64 should not use C1 runtime patching");
}
void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
ce->store_parameter(_trap_request, 0);
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
ce->add_call_info_here(_info);
DEBUG_ONLY(__ should_not_reach_here());
}
void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
address a;
if (_info->deoptimize_on_exception()) {
// Deoptimize, do not throw the exception, because it is probably wrong to do it here.
a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
} else {
a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
}
ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
__ bind(_entry);
__ far_call(RuntimeAddress(a));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
}
void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
assert(__ rsp_offset() == 0, "frame size should be fixed");
__ bind(_entry);
// pass the object in a scratch register because all other registers
// must be preserved
if (_obj->is_cpu_register()) {
__ mov(rscratch1, _obj->as_register());
}
__ far_call(RuntimeAddress(Runtime1::entry_for(_stub)), NULL, rscratch2);
ce->add_call_info_here(_info);
debug_only(__ should_not_reach_here());
}
void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
//---------------slow case: call to native-----------------
__ bind(_entry);
// Figure out where the args should go
// This should really convert the IntrinsicID to the Method* and signature
// but I don't know how to do that.
//
VMRegPair args[5];
BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT};
SharedRuntime::java_calling_convention(signature, args, 5, true);
// push parameters
// (src, src_pos, dest, destPos, length)
Register r[5];
r[0] = src()->as_register();
r[1] = src_pos()->as_register();
r[2] = dst()->as_register();
r[3] = dst_pos()->as_register();
r[4] = length()->as_register();
// next registers will get stored on the stack
for (int i = 0; i < 5 ; i++ ) {
VMReg r_1 = args[i].first();
if (r_1->is_stack()) {
int st_off = r_1->reg2stack() * wordSize;
__ str (r[i], Address(sp, st_off));
} else {
assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");
}
}
ce->align_call(lir_static_call);
ce->emit_static_call_stub();
if (ce->compilation()->bailed_out()) {
return; // CodeCache is full
}
Address resolve(SharedRuntime::get_resolve_static_call_stub(),
relocInfo::static_call_type);
address call = __ trampoline_call(resolve);
if (call == NULL) {
ce->bailout("trampoline stub overflow");
return;
}
ce->add_call_info_here(info());
#ifndef PRODUCT
__ lea(rscratch2, ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));
__ incrementw(Address(rscratch2));
#endif
__ b(_continuation);
}
/////////////////////////////////////////////////////////////////////////////
#if INCLUDE_ALL_GCS
void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
// At this point we know that marking is in progress.
// If do_load() is true then we have to emit the
// load of the previous value; otherwise it has already
// been loaded into _pre_val.
__ bind(_entry);
assert(pre_val()->is_register(), "Precondition.");
Register pre_val_reg = pre_val()->as_register();
if (do_load()) {
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
}
__ cbz(pre_val_reg, _continuation);
ce->store_parameter(pre_val()->as_register(), 0);
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id)));
__ b(_continuation);
}
void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
assert(addr()->is_register(), "Precondition.");
assert(new_val()->is_register(), "Precondition.");
Register new_val_reg = new_val()->as_register();
__ cbz(new_val_reg, _continuation);
ce->store_parameter(addr()->as_pointer_register(), 0);
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id)));
__ b(_continuation);
}
#endif // INCLUDE_ALL_GCS
/////////////////////////////////////////////////////////////////////////////
#undef __