--- a/hotspot/src/cpu/aarch64/vm/assembler_aarch64.hpp Fri Nov 25 17:56:30 2016 +0000
+++ b/hotspot/src/cpu/aarch64/vm/assembler_aarch64.hpp Tue Nov 22 08:46:49 2016 -0800
@@ -848,7 +848,7 @@
// architecture. In debug mode we shrink it in order to test
// trampolines, but not so small that branches in the interpreter
// are out of range.
- static const unsigned long branch_range = NOT_DEBUG(128 * M) DEBUG_ONLY(2 * M);
+ static const unsigned long branch_range = INCLUDE_JVMCI ? 128 * M : NOT_DEBUG(128 * M) DEBUG_ONLY(2 * M);
static bool reachable_from_branch_at(address branch, address target) {
return uabs(target - branch) < branch_range;
--- a/hotspot/src/cpu/aarch64/vm/jvmciCodeInstaller_aarch64.cpp Fri Nov 25 17:56:30 2016 +0000
+++ b/hotspot/src/cpu/aarch64/vm/jvmciCodeInstaller_aarch64.cpp Tue Nov 22 08:46:49 2016 -0800
@@ -41,28 +41,34 @@
void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS) {
address pc = _instructions->start() + pc_offset;
+#ifdef ASSERT
+ {
+ NativeInstruction *insn = nativeInstruction_at(pc);
+ if (HotSpotObjectConstantImpl::compressed(constant)) {
+ // Mov narrow constant: movz n << 16, movk
+ assert(Instruction_aarch64::extract(insn->encoding(), 31, 21) == 0b11010010101 &&
+ nativeInstruction_at(pc+4)->is_movk(), "wrong insn in patch");
+ } else {
+ // Move wide constant: movz n, movk, movk.
+ assert(nativeInstruction_at(pc+4)->is_movk()
+ && nativeInstruction_at(pc+8)->is_movk(), "wrong insn in patch");
+ }
+ }
+#endif // ASSERT
Handle obj = HotSpotObjectConstantImpl::object(constant);
jobject value = JNIHandles::make_local(obj());
- if (HotSpotObjectConstantImpl::compressed(constant)) {
- int oop_index = _oop_recorder->find_index(value);
- RelocationHolder rspec = oop_Relocation::spec(oop_index);
- _instructions->relocate(pc, rspec, 1);
- Unimplemented();
- } else {
- NativeMovConstReg* move = nativeMovConstReg_at(pc);
- move->set_data((intptr_t) value);
- int oop_index = _oop_recorder->find_index(value);
- RelocationHolder rspec = oop_Relocation::spec(oop_index);
- _instructions->relocate(pc, rspec);
- }
+ MacroAssembler::patch_oop(pc, (address)obj());
+ int oop_index = _oop_recorder->find_index(value);
+ RelocationHolder rspec = oop_Relocation::spec(oop_index);
+ _instructions->relocate(pc, rspec);
}
void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle constant, TRAPS) {
address pc = _instructions->start() + pc_offset;
if (HotSpotMetaspaceConstantImpl::compressed(constant)) {
narrowKlass narrowOop = record_narrow_metadata_reference(_instructions, pc, constant, CHECK);
+ MacroAssembler::patch_narrow_klass(pc, narrowOop);
TRACE_jvmci_3("relocating (narrow metaspace constant) at " PTR_FORMAT "/0x%x", p2i(pc), narrowOop);
- Unimplemented();
} else {
NativeMovConstReg* move = nativeMovConstReg_at(pc);
void* reference = record_metadata_reference(_instructions, pc, constant, CHECK);
@@ -167,8 +173,8 @@
if (jvmci_reg < RegisterImpl::number_of_registers) {
return as_Register(jvmci_reg)->as_VMReg();
} else {
- jint floatRegisterNumber = jvmci_reg - RegisterImpl::number_of_registers;
- if (floatRegisterNumber < FloatRegisterImpl::number_of_registers) {
+ jint floatRegisterNumber = jvmci_reg - RegisterImpl::number_of_registers_for_jvmci;
+ if (floatRegisterNumber >= 0 && floatRegisterNumber < FloatRegisterImpl::number_of_registers) {
return as_FloatRegister(floatRegisterNumber)->as_VMReg();
}
JVMCI_ERROR_NULL("invalid register number: %d", jvmci_reg);
--- a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp Fri Nov 25 17:56:30 2016 +0000
+++ b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp Tue Nov 22 08:46:49 2016 -0800
@@ -185,6 +185,19 @@
return instructions * NativeInstruction::instruction_size;
}
+int MacroAssembler::patch_narrow_klass(address insn_addr, narrowKlass n) {
+ // Metatdata pointers are either narrow (32 bits) or wide (48 bits).
+ // We encode narrow ones by setting the upper 16 bits in the first
+ // instruction.
+ NativeInstruction *insn = nativeInstruction_at(insn_addr);
+ assert(Instruction_aarch64::extract(insn->encoding(), 31, 21) == 0b11010010101 &&
+ nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
+
+ Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16);
+ Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff);
+ return 2 * NativeInstruction::instruction_size;
+}
+
address MacroAssembler::target_addr_for_insn(address insn_addr, unsigned insn) {
long offset = 0;
if ((Instruction_aarch64::extract(insn, 29, 24) & 0b011011) == 0b00011000) {
--- a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp Fri Nov 25 17:56:30 2016 +0000
+++ b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp Tue Nov 22 08:46:49 2016 -0800
@@ -590,6 +590,7 @@
#endif
static int patch_oop(address insn_addr, address o);
+ static int patch_narrow_klass(address insn_addr, narrowKlass n);
address emit_trampoline_stub(int insts_call_instruction_offset, address target);
--- a/hotspot/src/cpu/aarch64/vm/register_aarch64.hpp Fri Nov 25 17:56:30 2016 +0000
+++ b/hotspot/src/cpu/aarch64/vm/register_aarch64.hpp Tue Nov 22 08:46:49 2016 -0800
@@ -42,8 +42,9 @@
class RegisterImpl: public AbstractRegisterImpl {
public:
enum {
- number_of_registers = 32,
- number_of_byte_registers = 32
+ number_of_registers = 32,
+ number_of_byte_registers = 32,
+ number_of_registers_for_jvmci = 34 // Including SP and ZR.
};
// derived registers, offsets, and addresses
@@ -103,6 +104,10 @@
CONSTANT_REGISTER_DECLARATION(Register, r29, (29));
CONSTANT_REGISTER_DECLARATION(Register, r30, (30));
+
+// r31 is not a general purpose register, but represents either the
+// stack pointer or the zero/discard register depending on the
+// instruction.
CONSTANT_REGISTER_DECLARATION(Register, r31_sp, (31));
CONSTANT_REGISTER_DECLARATION(Register, zr, (32));
CONSTANT_REGISTER_DECLARATION(Register, sp, (33));
--- a/hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp Fri Nov 25 17:56:30 2016 +0000
+++ b/hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp Tue Nov 22 08:46:49 2016 -0800
@@ -2388,6 +2388,7 @@
__ movw(rcpool, (int32_t)Deoptimization::Unpack_reexecute);
__ mov(c_rarg0, rthread);
+ __ movw(c_rarg2, rcpool); // exec mode
__ lea(rscratch1,
RuntimeAddress(CAST_FROM_FN_PTR(address,
Deoptimization::uncommon_trap)));
--- a/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.aarch64/src/jdk/vm/ci/aarch64/AArch64.java Fri Nov 25 17:56:30 2016 +0000
+++ b/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.aarch64/src/jdk/vm/ci/aarch64/AArch64.java Tue Nov 22 08:46:49 2016 -0800
@@ -84,6 +84,10 @@
public static final Register lr = r30;
+ // Used by runtime code: cannot be compiler-allocated.
+ public static final Register rscratch1 = r8;
+ public static final Register rscratch2 = r9;
+
// @formatter:off
public static final RegisterArray cpuRegisters = new RegisterArray(
r0, r1, r2, r3, r4, r5, r6, r7,
--- a/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot.aarch64/src/jdk/vm/ci/hotspot/aarch64/AArch64HotSpotRegisterConfig.java Fri Nov 25 17:56:30 2016 +0000
+++ b/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot.aarch64/src/jdk/vm/ci/hotspot/aarch64/AArch64HotSpotRegisterConfig.java Tue Nov 22 08:46:49 2016 -0800
@@ -25,18 +25,19 @@
import static jdk.vm.ci.aarch64.AArch64.lr;
import static jdk.vm.ci.aarch64.AArch64.r0;
import static jdk.vm.ci.aarch64.AArch64.r1;
-import static jdk.vm.ci.aarch64.AArch64.r12;
import static jdk.vm.ci.aarch64.AArch64.r2;
-import static jdk.vm.ci.aarch64.AArch64.r27;
-import static jdk.vm.ci.aarch64.AArch64.r28;
-import static jdk.vm.ci.aarch64.AArch64.r29;
import static jdk.vm.ci.aarch64.AArch64.r3;
-import static jdk.vm.ci.aarch64.AArch64.r31;
import static jdk.vm.ci.aarch64.AArch64.r4;
import static jdk.vm.ci.aarch64.AArch64.r5;
import static jdk.vm.ci.aarch64.AArch64.r6;
import static jdk.vm.ci.aarch64.AArch64.r7;
-import static jdk.vm.ci.aarch64.AArch64.r9;
+import static jdk.vm.ci.aarch64.AArch64.rscratch1;
+import static jdk.vm.ci.aarch64.AArch64.rscratch2;
+import static jdk.vm.ci.aarch64.AArch64.r12;
+import static jdk.vm.ci.aarch64.AArch64.r27;
+import static jdk.vm.ci.aarch64.AArch64.r28;
+import static jdk.vm.ci.aarch64.AArch64.r29;
+import static jdk.vm.ci.aarch64.AArch64.r31;
import static jdk.vm.ci.aarch64.AArch64.sp;
import static jdk.vm.ci.aarch64.AArch64.v0;
import static jdk.vm.ci.aarch64.AArch64.v1;
@@ -114,7 +115,7 @@
private final RegisterArray nativeGeneralParameterRegisters = new RegisterArray(r0, r1, r2, r3, r4, r5, r6, r7);
private final RegisterArray simdParameterRegisters = new RegisterArray(v0, v1, v2, v3, v4, v5, v6, v7);
- public static final Register inlineCacheRegister = r9;
+ public static final Register inlineCacheRegister = rscratch2;
/**
* Vtable stubs expect the metaspace Method in r12.
@@ -125,7 +126,8 @@
public static final Register threadRegister = r28;
public static final Register fp = r29;
- private static final RegisterArray reservedRegisters = new RegisterArray(threadRegister, fp, lr, r31, zr, sp);
+ private static final RegisterArray reservedRegisters
+ = new RegisterArray(rscratch1, rscratch2, threadRegister, fp, lr, r31, zr, sp);
private static RegisterArray initAllocatable(Architecture arch, boolean reserveForHeapBase) {
RegisterArray allRegisters = arch.getAvailableValueRegisters();