--- a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp Wed Aug 22 07:51:07 2018 -0400
+++ b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp Sun Aug 19 20:00:57 2018 +0200
@@ -941,6 +941,10 @@
index = tmp;
}
+ if (is_updateBytes) {
+ base_op = access_resolve(ACCESS_READ, base_op);
+ }
+
if (offset) {
LIR_Opr tmp = new_pointer_register();
__ add(base_op, LIR_OprFact::intConst(offset), tmp);
@@ -1019,6 +1023,10 @@
index = tmp;
}
+ if (is_updateBytes) {
+ base_op = access_resolve(ACCESS_READ, base_op);
+ }
+
if (offset) {
LIR_Opr tmp = new_pointer_register();
__ add(base_op, LIR_OprFact::intConst(offset), tmp);
--- a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp Wed Aug 22 07:51:07 2018 -0400
+++ b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp Sun Aug 19 20:00:57 2018 +0200
@@ -997,6 +997,10 @@
}
#endif
+ if (is_updateBytes) {
+ base_op = access_resolve(IS_NOT_NULL | ACCESS_READ, base_op);
+ }
+
LIR_Address* a = new LIR_Address(base_op,
index,
offset,
@@ -1054,7 +1058,7 @@
constant_aOffset = result_aOffset->as_jlong();
result_aOffset = LIR_OprFact::illegalOpr;
}
- LIR_Opr result_a = a.result();
+ LIR_Opr result_a = access_resolve(ACCESS_READ, a.result());
long constant_bOffset = 0;
LIR_Opr result_bOffset = bOffset.result();
@@ -1062,7 +1066,7 @@
constant_bOffset = result_bOffset->as_jlong();
result_bOffset = LIR_OprFact::illegalOpr;
}
- LIR_Opr result_b = b.result();
+ LIR_Opr result_b = access_resolve(ACCESS_READ, b.result());
#ifndef _LP64
result_a = new_register(T_INT);
--- a/src/hotspot/share/c1/c1_LIRGenerator.cpp Wed Aug 22 07:51:07 2018 -0400
+++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp Sun Aug 19 20:00:57 2018 +0200
@@ -1690,6 +1690,15 @@
}
}
+LIR_Opr LIRGenerator::access_resolve(DecoratorSet decorators, LIR_Opr obj) {
+ // Use stronger ACCESS_WRITE|ACCESS_READ by default.
+ if ((decorators & (ACCESS_READ | ACCESS_WRITE)) == 0) {
+ decorators |= ACCESS_READ | ACCESS_WRITE;
+ }
+
+ return _barrier_set->resolve(this, decorators, obj);
+}
+
void LIRGenerator::do_LoadField(LoadField* x) {
bool needs_patching = x->needs_patching();
bool is_volatile = x->field()->is_volatile();
@@ -1767,11 +1776,12 @@
if (GenerateRangeChecks) {
CodeEmitInfo* info = state_for(x);
CodeStub* stub = new RangeCheckStub(info, index.result());
+ LIR_Opr buf_obj = access_resolve(IS_NOT_NULL | ACCESS_READ, buf.result());
if (index.result()->is_constant()) {
- cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
+ cmp_mem_int(lir_cond_belowEqual, buf_obj, java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
__ branch(lir_cond_belowEqual, T_INT, stub);
} else {
- cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
+ cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf_obj,
java_nio_Buffer::limit_offset(), T_INT, info);
__ branch(lir_cond_aboveEqual, T_INT, stub);
}
--- a/src/hotspot/share/c1/c1_LIRGenerator.hpp Wed Aug 22 07:51:07 2018 -0400
+++ b/src/hotspot/share/c1/c1_LIRGenerator.hpp Sun Aug 19 20:00:57 2018 +0200
@@ -300,6 +300,8 @@
LIR_Opr access_atomic_add_at(DecoratorSet decorators, BasicType type,
LIRItem& base, LIRItem& offset, LIRItem& value);
+ LIR_Opr access_resolve(DecoratorSet decorators, LIR_Opr obj);
+
// These need to guarantee JMM volatile semantics are preserved on each platform
// and requires one implementation per architecture.
LIR_Opr atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value);
--- a/src/hotspot/share/gc/shared/c1/barrierSetC1.cpp Wed Aug 22 07:51:07 2018 -0400
+++ b/src/hotspot/share/gc/shared/c1/barrierSetC1.cpp Sun Aug 19 20:00:57 2018 +0200
@@ -334,3 +334,7 @@
}
}
}
+
+LIR_Opr BarrierSetC1::resolve(LIRGenerator* gen, DecoratorSet decorators, LIR_Opr obj) {
+ return obj;
+}
--- a/src/hotspot/share/gc/shared/c1/barrierSetC1.hpp Wed Aug 22 07:51:07 2018 -0400
+++ b/src/hotspot/share/gc/shared/c1/barrierSetC1.hpp Sun Aug 19 20:00:57 2018 +0200
@@ -134,6 +134,8 @@
virtual LIR_Opr atomic_xchg_at(LIRAccess& access, LIRItem& value);
virtual LIR_Opr atomic_add_at(LIRAccess& access, LIRItem& value);
+ virtual LIR_Opr resolve(LIRGenerator* gen, DecoratorSet decorators, LIR_Opr obj);
+
virtual void generate_c1_runtime_stubs(BufferBlob* buffer_blob) {}
};