--- a/src/hotspot/cpu/arm/gc/shared/barrierSetAssembler_arm.hpp Mon Sep 24 16:44:24 2018 +0300
+++ b/src/hotspot/cpu/arm/gc/shared/barrierSetAssembler_arm.hpp Mon Sep 24 16:52:12 2018 +0300
@@ -68,6 +68,10 @@
);
virtual void barrier_stubs_init() {}
+
+ virtual void resolve(MacroAssembler* masm, DecoratorSet decorators, Register obj) {
+ // Default implementation does not need to do anything.
+ }
};
#endif // CPU_ARM_GC_SHARED_BARRIERSETASSEMBLER_ARM_HPP
--- a/src/hotspot/cpu/arm/macroAssembler_arm.cpp Mon Sep 24 16:44:24 2018 +0300
+++ b/src/hotspot/cpu/arm/macroAssembler_arm.cpp Mon Sep 24 16:52:12 2018 +0300
@@ -2639,6 +2639,14 @@
}
}
+void MacroAssembler::resolve(DecoratorSet decorators, Register obj) {
+ // Use stronger ACCESS_WRITE|ACCESS_READ by default.
+ if ((decorators & (ACCESS_READ | ACCESS_WRITE)) == 0) {
+ decorators |= ACCESS_READ | ACCESS_WRITE;
+ }
+ BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+ return bs->resolve(this, decorators, obj);
+}
#ifdef AARCH64
--- a/src/hotspot/cpu/arm/macroAssembler_arm.hpp Mon Sep 24 16:44:24 2018 +0300
+++ b/src/hotspot/cpu/arm/macroAssembler_arm.hpp Mon Sep 24 16:52:12 2018 +0300
@@ -1056,6 +1056,10 @@
void access_load_at(BasicType type, DecoratorSet decorators, Address src, Register dst, Register tmp1, Register tmp2, Register tmp3);
void access_store_at(BasicType type, DecoratorSet decorators, Address obj, Register new_val, Register tmp1, Register tmp2, Register tmp3, bool is_null);
+ // Resolves obj for access. Result is placed in the same register.
+ // All other registers are preserved.
+ void resolve(DecoratorSet decorators, Register obj);
+
#ifdef AARCH64
void encode_heap_oop(Register dst, Register src);
void encode_heap_oop(Register r) {
--- a/src/hotspot/cpu/arm/sharedRuntime_arm.cpp Mon Sep 24 16:44:24 2018 +0300
+++ b/src/hotspot/cpu/arm/sharedRuntime_arm.cpp Mon Sep 24 16:52:12 2018 +0300
@@ -1573,6 +1573,8 @@
// Remember the handle for the unlocking code
__ mov(sync_handle, R1);
+ __ resolve(IS_NOT_NULL, sync_obj);
+
if(UseBiasedLocking) {
__ biased_locking_enter(sync_obj, tmp, disp_hdr/*scratched*/, false, Rtemp, lock_done, slow_lock_biased);
}
@@ -1690,6 +1692,8 @@
if (method->is_synchronized()) {
__ ldr(sync_obj, Address(sync_handle));
+ __ resolve(IS_NOT_NULL, sync_obj);
+
if(UseBiasedLocking) {
__ biased_locking_exit(sync_obj, Rtemp, unlock_done);
// disp_hdr may not have been saved on entry with biased locking
--- a/src/hotspot/cpu/arm/templateInterpreterGenerator_arm.cpp Mon Sep 24 16:44:24 2018 +0300
+++ b/src/hotspot/cpu/arm/templateInterpreterGenerator_arm.cpp Mon Sep 24 16:52:12 2018 +0300
@@ -605,6 +605,7 @@
#endif // AARCH64
__ load_mirror(R0, Rmethod, Rtemp);
__ bind(done);
+ __ resolve(IS_NOT_NULL, R0);
}
// add space for monitor & lock
--- a/src/hotspot/cpu/arm/templateTable_arm.cpp Mon Sep 24 16:44:24 2018 +0300
+++ b/src/hotspot/cpu/arm/templateTable_arm.cpp Mon Sep 24 16:52:12 2018 +0300
@@ -4885,6 +4885,8 @@
// check for NULL object
__ null_check(Robj, Rtemp);
+ __ resolve(IS_NOT_NULL, Robj);
+
const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);
assert (entry_size % StackAlignmentInBytes == 0, "keep stack alignment");
Label allocate_monitor, allocated;
@@ -5015,6 +5017,8 @@
// check for NULL object
__ null_check(Robj, Rtemp);
+ __ resolve(IS_NOT_NULL, Robj);
+
const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);
Label found, throw_exception;