8203157: Object equals abstraction for BarrierSetAssembler
Reviewed-by: eosterlund, aph
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp Wed Jun 13 06:35:04 2018 +0200
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp Wed Jun 13 10:01:53 2018 +0200
@@ -1880,7 +1880,7 @@
// cpu register - cpu register
Register reg2 = opr2->as_register();
if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
- __ cmp(reg1, reg2);
+ __ cmpoop(reg1, reg2);
} else {
assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?");
__ cmpw(reg1, reg2);
@@ -1911,8 +1911,9 @@
break;
case T_OBJECT:
case T_ARRAY:
- imm = jlong(opr2->as_constant_ptr()->as_jobject());
- break;
+ jobject2reg(opr2->as_constant_ptr()->as_jobject(), rscratch1);
+ __ cmpoop(reg1, rscratch1);
+ return;
default:
ShouldNotReachHere();
imm = 0; // unreachable
--- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp Wed Jun 13 06:35:04 2018 +0200
+++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp Wed Jun 13 10:01:53 2018 +0200
@@ -109,6 +109,11 @@
}
}
+void BarrierSetAssembler::obj_equals(MacroAssembler* masm,
+ Register obj1, Register obj2) {
+ __ cmp(obj1, obj2);
+}
+
void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
Register obj, Register tmp, Label& slowpath) {
// If mask changes we need to ensure that the inverse is still encodable as an immediate
--- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp Wed Jun 13 06:35:04 2018 +0200
+++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp Wed Jun 13 10:01:53 2018 +0200
@@ -40,6 +40,9 @@
virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2);
+ virtual void obj_equals(MacroAssembler* masm,
+ Register obj1, Register obj2);
+
virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
Register obj, Register tmp, Label& slowpath);
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Wed Jun 13 06:35:04 2018 +0200
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Wed Jun 13 10:01:53 2018 +0200
@@ -3651,6 +3651,11 @@
cmp(src1, rscratch1);
}
+void MacroAssembler::cmpoop(Register obj1, Register obj2) {
+ BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+ bs->obj_equals(this, obj1, obj2);
+}
+
void MacroAssembler::load_klass(Register dst, Register src) {
if (UseCompressedClassPointers) {
ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
@@ -5048,6 +5053,8 @@
// a1 & a2 == 0 means (some-pointer is null) or
// (very-rare-or-even-probably-impossible-pointer-values)
// so, we can save one branch in most cases
+ cmpoop(a1, a2);
+ br(EQ, SAME);
eor(rscratch1, a1, a2);
tst(a1, a2);
mov(result, false);
@@ -5131,7 +5138,7 @@
// faster to perform another branch before comparing a1 and a2
cmp(cnt1, elem_per_word);
br(LE, SHORT); // short or same
- cmp(a1, a2);
+ cmpoop(a1, a2);
br(EQ, SAME);
ldr(tmp3, Address(pre(a1, base_offset)));
cmp(cnt1, stubBytesThreshold);
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp Wed Jun 13 06:35:04 2018 +0200
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp Wed Jun 13 10:01:53 2018 +0200
@@ -979,6 +979,8 @@
void addptr(const Address &dst, int32_t src);
void cmpptr(Register src1, Address src2);
+ void cmpoop(Register obj1, Register obj2);
+
// Various forms of CAS
void cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp,
--- a/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp Wed Jun 13 06:35:04 2018 +0200
+++ b/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp Wed Jun 13 10:01:53 2018 +0200
@@ -152,7 +152,7 @@
// assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
Label L;
__ ldr(rscratch1, __ argument_address(temp2, -1));
- __ cmp(recv, rscratch1);
+ __ cmpoop(recv, rscratch1);
__ br(Assembler::EQ, L);
__ ldr(r0, __ argument_address(temp2, -1));
__ hlt(0);
--- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp Wed Jun 13 06:35:04 2018 +0200
+++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp Wed Jun 13 10:01:53 2018 +0200
@@ -2027,7 +2027,7 @@
// assume branch is more often taken than not (loops use backward branches)
Label not_taken;
__ pop_ptr(r1);
- __ cmp(r1, r0);
+ __ cmpoop(r1, r0);
__ br(j_not(cc), not_taken);
branch(false, false);
__ bind(not_taken);
--- a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp Wed Jun 13 06:35:04 2018 +0200
+++ b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp Wed Jun 13 10:01:53 2018 +0200
@@ -187,6 +187,27 @@
}
}
+#ifndef _LP64
+void BarrierSetAssembler::obj_equals(MacroAssembler* masm,
+ Address obj1, jobject obj2) {
+ __ cmpoop_raw(obj1, obj2);
+}
+
+void BarrierSetAssembler::obj_equals(MacroAssembler* masm,
+ Register obj1, jobject obj2) {
+ __ cmpoop_raw(obj1, obj2);
+}
+#endif
+void BarrierSetAssembler::obj_equals(MacroAssembler* masm,
+ Register obj1, Address obj2) {
+ __ cmpptr(obj1, obj2);
+}
+
+void BarrierSetAssembler::obj_equals(MacroAssembler* masm,
+ Register obj1, Register obj2) {
+ __ cmpptr(obj1, obj2);
+}
+
void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
Register obj, Register tmp, Label& slowpath) {
__ clear_jweak_tag(obj);
--- a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.hpp Wed Jun 13 06:35:04 2018 +0200
+++ b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.hpp Wed Jun 13 10:01:53 2018 +0200
@@ -44,6 +44,18 @@
virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2);
+#ifndef _LP64
+ virtual void obj_equals(MacroAssembler* masm,
+ Address obj1, jobject obj2);
+ virtual void obj_equals(MacroAssembler* masm,
+ Register obj1, jobject obj2);
+#endif
+
+ virtual void obj_equals(MacroAssembler* masm,
+ Register obj1, Register obj2);
+ virtual void obj_equals(MacroAssembler* masm,
+ Register obj1, Address obj2);
+
// Support for jniFastGetField to try resolving a jobject/jweak in native
virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
Register obj, Register tmp, Label& slowpath);
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp Wed Jun 13 06:35:04 2018 +0200
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp Wed Jun 13 10:01:53 2018 +0200
@@ -118,12 +118,22 @@
cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate());
}
+void MacroAssembler::cmpoop_raw(Address src1, jobject obj) {
+ cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
+}
+
+void MacroAssembler::cmpoop_raw(Register src1, jobject obj) {
+ cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
+}
+
void MacroAssembler::cmpoop(Address src1, jobject obj) {
- cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
+ BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+ bs->obj_equals(this, src1, obj);
}
void MacroAssembler::cmpoop(Register src1, jobject obj) {
- cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
+ BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+ bs->obj_equals(this, src1, obj);
}
void MacroAssembler::extend_sign(Register hi, Register lo) {
@@ -2785,17 +2795,20 @@
}
void MacroAssembler::cmpoop(Register src1, Register src2) {
- cmpptr(src1, src2);
+ BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+ bs->obj_equals(this, src1, src2);
}
void MacroAssembler::cmpoop(Register src1, Address src2) {
- cmpptr(src1, src2);
+ BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+ bs->obj_equals(this, src1, src2);
}
#ifdef _LP64
void MacroAssembler::cmpoop(Register src1, jobject src2) {
movoop(rscratch1, src2);
- cmpptr(src1, rscratch1);
+ BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+ bs->obj_equals(this, src1, rscratch1);
}
#endif
--- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp Wed Jun 13 06:35:04 2018 +0200
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp Wed Jun 13 10:01:53 2018 +0200
@@ -741,11 +741,13 @@
void cmpklass(Address dst, Metadata* obj);
void cmpklass(Register dst, Metadata* obj);
void cmpoop(Address dst, jobject obj);
+ void cmpoop_raw(Address dst, jobject obj);
#endif // _LP64
void cmpoop(Register src1, Register src2);
void cmpoop(Register src1, Address src2);
void cmpoop(Register dst, jobject obj);
+ void cmpoop_raw(Register dst, jobject obj);
// NOTE src2 must be the lval. This is NOT an mem-mem compare
void cmpptr(Address src1, AddressLiteral src2);