8202714: Create a MacroAssembler::access_load/store_at wrapper for AArch64
Reviewed-by: adinn, smonteith
--- a/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp Tue May 15 09:33:02 2018 +0100
+++ b/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp Tue May 15 05:33:26 2018 -0400
@@ -116,7 +116,7 @@
// Do we need to load the previous value?
if (obj != noreg) {
- __ load_heap_oop(pre_val, Address(obj, 0));
+ __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW);
}
// Is the previous value null?
@@ -294,7 +294,7 @@
false /* expand_call */);
if (val == noreg) {
- __ store_heap_oop_null(Address(r3, 0));
+ BarrierSetAssembler::store_at(masm, decorators, type, Address(r3, 0), noreg, noreg, noreg);
} else {
// G1 barrier needs uncompressed oop for region cross check.
Register new_val = val;
@@ -302,7 +302,7 @@
new_val = rscratch2;
__ mov(new_val, val);
}
- __ store_heap_oop(Address(r3, 0), val);
+ BarrierSetAssembler::store_at(masm, decorators, type, Address(r3, 0), val, noreg, noreg);
g1_write_barrier_post(masm,
r3 /* store_adr */,
new_val /* new_val */,
--- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp Tue May 15 09:33:02 2018 +0100
+++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp Tue May 15 05:33:26 2018 -0400
@@ -35,11 +35,21 @@
bool on_heap = (decorators & IN_HEAP) != 0;
bool on_root = (decorators & IN_ROOT) != 0;
+ bool oop_not_null = (decorators & OOP_NOT_NULL) != 0;
switch (type) {
case T_OBJECT:
case T_ARRAY: {
if (on_heap) {
- __ load_heap_oop(dst, src);
+ if (UseCompressedOops) {
+ __ ldrw(dst, src);
+ if (oop_not_null) {
+ __ decode_heap_oop_not_null(dst);
+ } else {
+ __ decode_heap_oop(dst);
+ }
+ } else {
+ __ ldr(dst, src);
+ }
} else {
assert(on_root, "why else?");
__ ldr(dst, src);
@@ -57,8 +67,17 @@
switch (type) {
case T_OBJECT:
case T_ARRAY: {
+ val = val == noreg ? zr : val;
if (on_heap) {
- __ store_heap_oop(dst, val);
+ if (UseCompressedOops) {
+ assert(!dst.uses(val), "not enough registers");
+ if (val != zr) {
+ __ encode_heap_oop(val);
+ }
+ __ strw(val, dst);
+ } else {
+ __ str(val, dst);
+ }
} else {
assert(on_root, "why else?");
__ str(val, dst);
--- a/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.cpp Tue May 15 09:33:02 2018 +0100
+++ b/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.cpp Tue May 15 05:33:26 2018 -0400
@@ -90,13 +90,14 @@
void CardTableBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2) {
+ bool in_heap = (decorators & IN_HEAP) != 0;
bool on_array = (decorators & IN_HEAP_ARRAY) != 0;
bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
bool precise = on_array || on_anonymous;
- if (val == noreg) {
- __ store_heap_oop_null(dst);
- } else {
- __ store_heap_oop(dst, val);
+
+ bool needs_post_barrier = val != noreg && in_heap;
+ BarrierSetAssembler::store_at(masm, decorators, type, dst, val, noreg, noreg);
+ if (needs_post_barrier) {
// flatten object address if needed
if (!precise || (dst.index() == noreg && dst.offset() == 0)) {
store_check(masm, dst.base(), dst);
--- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp Tue May 15 09:33:02 2018 +0100
+++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp Tue May 15 05:33:26 2018 -0400
@@ -278,8 +278,7 @@
resolve_oop_handle(result, tmp);
// Add in the index
add(result, result, index);
- BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
- bs->load_at(this, IN_HEAP, T_OBJECT, result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT)), tmp, /*tmp_thread*/ noreg);
+ load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
}
void InterpreterMacroAssembler::load_resolved_klass_at_offset(
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Tue May 15 09:33:02 2018 +0100
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Tue May 15 05:33:26 2018 -0400
@@ -3975,41 +3975,48 @@
movk(dst, nk & 0xffff);
}
-void MacroAssembler::load_heap_oop(Register dst, Address src)
-{
- if (UseCompressedOops) {
- ldrw(dst, src);
- decode_heap_oop(dst);
+void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators,
+ Register dst, Address src,
+ Register tmp1, Register thread_tmp) {
+ BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
+ bool as_raw = (decorators & AS_RAW) != 0;
+ if (as_raw) {
+ bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
} else {
- ldr(dst, src);
+ bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
}
}
-void MacroAssembler::load_heap_oop_not_null(Register dst, Address src)
-{
- if (UseCompressedOops) {
- ldrw(dst, src);
- decode_heap_oop_not_null(dst);
+void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
+ Address dst, Register src,
+ Register tmp1, Register thread_tmp) {
+ BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
+ bool as_raw = (decorators & AS_RAW) != 0;
+ if (as_raw) {
+ bs->BarrierSetAssembler::store_at(this, decorators, type, dst, src, tmp1, thread_tmp);
} else {
- ldr(dst, src);
+ bs->store_at(this, decorators, type, dst, src, tmp1, thread_tmp);
}
}
-void MacroAssembler::store_heap_oop(Address dst, Register src) {
- if (UseCompressedOops) {
- assert(!dst.uses(src), "not enough registers");
- encode_heap_oop(src);
- strw(src, dst);
- } else
- str(src, dst);
+void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
+ Register thread_tmp, DecoratorSet decorators) {
+ access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp);
+}
+
+void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
+ Register thread_tmp, DecoratorSet decorators) {
+ access_load_at(T_OBJECT, IN_HEAP | OOP_NOT_NULL | decorators, dst, src, tmp1, thread_tmp);
+}
+
+void MacroAssembler::store_heap_oop(Address dst, Register src, Register tmp1,
+ Register thread_tmp, DecoratorSet decorators) {
+ access_store_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp);
}
// Used for storing NULLs.
void MacroAssembler::store_heap_oop_null(Address dst) {
- if (UseCompressedOops) {
- strw(zr, dst);
- } else
- str(zr, dst);
+ access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg);
}
Address MacroAssembler::allocate_metadata_address(Metadata* obj) {
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp Tue May 15 09:33:02 2018 +0100
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp Tue May 15 05:33:26 2018 -0400
@@ -789,10 +789,19 @@
void resolve_oop_handle(Register result, Register tmp = r5);
void load_mirror(Register dst, Register method, Register tmp = r5);
- void load_heap_oop(Register dst, Address src);
+ void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
+ Register tmp1, Register tmp_thread);
+
+ void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register src,
+ Register tmp1, Register tmp_thread);
- void load_heap_oop_not_null(Register dst, Address src);
- void store_heap_oop(Address dst, Register src);
+ void load_heap_oop(Register dst, Address src, Register tmp1 = noreg,
+ Register thread_tmp = noreg, DecoratorSet decorators = 0);
+
+ void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg,
+ Register thread_tmp = noreg, DecoratorSet decorators = 0);
+ void store_heap_oop(Address dst, Register src, Register tmp1 = noreg,
+ Register tmp_thread = noreg, DecoratorSet decorators = 0);
// currently unimplemented
// Used for storing NULL. All other oop constants should be
--- a/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp Tue May 15 09:33:02 2018 +0100
+++ b/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp Tue May 15 05:33:26 2018 -0400
@@ -135,11 +135,11 @@
// Load the invoker, as MH -> MH.form -> LF.vmentry
__ verify_oop(recv);
- __ load_heap_oop(method_temp, Address(recv, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes())));
+ __ load_heap_oop(method_temp, Address(recv, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes())), temp2);
__ verify_oop(method_temp);
- __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())));
+ __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())), temp2);
__ verify_oop(method_temp);
- __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes())));
+ __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes())), temp2);
__ verify_oop(method_temp);
__ ldr(method_temp, Address(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes())));
@@ -311,7 +311,7 @@
if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) {
Label L_ok;
Register temp2_defc = temp2;
- __ load_heap_oop(temp2_defc, member_clazz);
+ __ load_heap_oop(temp2_defc, member_clazz, temp3);
load_klass_from_Class(_masm, temp2_defc);
__ verify_klass_ptr(temp2_defc);
__ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, L_ok);
--- a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp Tue May 15 09:33:02 2018 +0100
+++ b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp Tue May 15 05:33:26 2018 -0400
@@ -1816,13 +1816,13 @@
__ align(OptoLoopAlignment);
__ BIND(L_store_element);
- __ store_heap_oop(__ post(to, UseCompressedOops ? 4 : 8), copied_oop); // store the oop
+ __ store_heap_oop(__ post(to, UseCompressedOops ? 4 : 8), copied_oop, noreg, noreg, AS_RAW); // store the oop
__ sub(count, count, 1);
__ cbz(count, L_do_card_marks);
// ======== loop entry is here ========
__ BIND(L_load_element);
- __ load_heap_oop(copied_oop, __ post(from, UseCompressedOops ? 4 : 8)); // load the oop
+ __ load_heap_oop(copied_oop, __ post(from, UseCompressedOops ? 4 : 8), noreg, noreg, AS_RAW); // load the oop
__ cbz(copied_oop, L_store_element);
__ load_klass(r19_klass, copied_oop);// query the object klass
--- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp Tue May 15 09:33:02 2018 +0100
+++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp Tue May 15 05:33:26 2018 -0400
@@ -147,16 +147,14 @@
Register val,
DecoratorSet decorators) {
assert(val == noreg || val == r0, "parameter is just for looks");
- BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
- bs->store_at(_masm, decorators, T_OBJECT, dst, val, /*tmp1*/ r10, /*tmp2*/ r1);
+ __ store_heap_oop(dst, val, r10, r1, decorators);
}
static void do_oop_load(InterpreterMacroAssembler* _masm,
Address src,
Register dst,
DecoratorSet decorators) {
- BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
- bs->load_at(_masm, decorators, T_OBJECT, dst, src, /*tmp1*/ r10, /*tmp_thread*/ r1);
+ __ load_heap_oop(dst, src, r10, r1, decorators);
}
Address TemplateTable::at_bcp(int offset) {