8225642: ZGC: Crash due to bad oops being spilled to stack in load barriers
Reviewed-by: neliasso, pliden
Contributed-by: erik.osterlund@oracle.com, stuart.monteith@linaro.org
--- a/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad Tue Jul 02 11:33:01 2019 +0200
+++ b/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad Tue Jul 02 11:33:05 2019 +0200
@@ -61,7 +61,7 @@
//
// Execute ZGC load barrier (strong) slow path
//
-instruct loadBarrierSlowReg(iRegP dst, memory mem, rFlagsReg cr,
+instruct loadBarrierSlowReg(iRegP dst, memory src, rFlagsReg cr,
vRegD_V0 v0, vRegD_V1 v1, vRegD_V2 v2, vRegD_V3 v3, vRegD_V4 v4,
vRegD_V5 v5, vRegD_V6 v6, vRegD_V7 v7, vRegD_V8 v8, vRegD_V9 v9,
vRegD_V10 v10, vRegD_V11 v11, vRegD_V12 v12, vRegD_V13 v13, vRegD_V14 v14,
@@ -69,20 +69,22 @@
vRegD_V20 v20, vRegD_V21 v21, vRegD_V22 v22, vRegD_V23 v23, vRegD_V24 v24,
vRegD_V25 v25, vRegD_V26 v26, vRegD_V27 v27, vRegD_V28 v28, vRegD_V29 v29,
vRegD_V30 v30, vRegD_V31 v31) %{
- match(Set dst (LoadBarrierSlowReg mem));
+ match(Set dst (LoadBarrierSlowReg src dst));
predicate(!n->as_LoadBarrierSlowReg()->is_weak());
- effect(DEF dst, KILL cr,
+ effect(KILL cr,
KILL v0, KILL v1, KILL v2, KILL v3, KILL v4, KILL v5, KILL v6, KILL v7,
KILL v8, KILL v9, KILL v10, KILL v11, KILL v12, KILL v13, KILL v14,
KILL v15, KILL v16, KILL v17, KILL v18, KILL v19, KILL v20, KILL v21,
KILL v22, KILL v23, KILL v24, KILL v25, KILL v26, KILL v27, KILL v28,
KILL v29, KILL v30, KILL v31);
- format %{"LoadBarrierSlowReg $dst, $mem" %}
+ format %{ "lea $dst, $src\n\t"
+ "call #ZLoadBarrierSlowPath" %}
+
ins_encode %{
- z_load_barrier_slow_reg(_masm, $dst$$Register, $mem$$base$$Register,
- $mem$$index, $mem$$scale, $mem$$disp, false);
+ z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$base$$Register,
+ $src$$index, $src$$scale, $src$$disp, false);
%}
ins_pipe(pipe_slow);
%}
@@ -90,7 +92,7 @@
//
// Execute ZGC load barrier (weak) slow path
//
-instruct loadBarrierWeakSlowReg(iRegP dst, memory mem, rFlagsReg cr,
+instruct loadBarrierWeakSlowReg(iRegP dst, memory src, rFlagsReg cr,
vRegD_V0 v0, vRegD_V1 v1, vRegD_V2 v2, vRegD_V3 v3, vRegD_V4 v4,
vRegD_V5 v5, vRegD_V6 v6, vRegD_V7 v7, vRegD_V8 v8, vRegD_V9 v9,
vRegD_V10 v10, vRegD_V11 v11, vRegD_V12 v12, vRegD_V13 v13, vRegD_V14 v14,
@@ -98,20 +100,22 @@
vRegD_V20 v20, vRegD_V21 v21, vRegD_V22 v22, vRegD_V23 v23, vRegD_V24 v24,
vRegD_V25 v25, vRegD_V26 v26, vRegD_V27 v27, vRegD_V28 v28, vRegD_V29 v29,
vRegD_V30 v30, vRegD_V31 v31) %{
- match(Set dst (LoadBarrierSlowReg mem));
+ match(Set dst (LoadBarrierSlowReg src dst));
predicate(n->as_LoadBarrierSlowReg()->is_weak());
- effect(DEF dst, KILL cr,
+ effect(KILL cr,
KILL v0, KILL v1, KILL v2, KILL v3, KILL v4, KILL v5, KILL v6, KILL v7,
KILL v8, KILL v9, KILL v10, KILL v11, KILL v12, KILL v13, KILL v14,
KILL v15, KILL v16, KILL v17, KILL v18, KILL v19, KILL v20, KILL v21,
KILL v22, KILL v23, KILL v24, KILL v25, KILL v26, KILL v27, KILL v28,
KILL v29, KILL v30, KILL v31);
- format %{"LoadBarrierWeakSlowReg $dst, $mem" %}
+ format %{ "lea $dst, $src\n\t"
+ "call #ZLoadBarrierSlowPath" %}
+
ins_encode %{
- z_load_barrier_slow_reg(_masm, $dst$$Register, $mem$$base$$Register,
- $mem$$index, $mem$$scale, $mem$$disp, true);
+ z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$base$$Register,
+ $src$$index, $src$$scale, $src$$disp, true);
%}
ins_pipe(pipe_slow);
%}
--- a/src/hotspot/cpu/x86/gc/z/z_x86_64.ad Tue Jul 02 11:33:01 2019 +0200
+++ b/src/hotspot/cpu/x86/gc/z/z_x86_64.ad Tue Jul 02 11:33:05 2019 +0200
@@ -45,32 +45,31 @@
// For XMM and YMM enabled processors
instruct zLoadBarrierSlowRegXmmAndYmm(rRegP dst, memory src, rFlagsReg cr,
- rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
+ rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3,
rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
+ match(Set dst (LoadBarrierSlowReg src dst));
+ predicate(UseAVX <= 2 && !n->as_LoadBarrierSlowReg()->is_weak());
- match(Set dst (LoadBarrierSlowReg src));
- predicate((UseAVX <= 2) && !n->as_LoadBarrierSlowReg()->is_weak());
-
- effect(DEF dst, KILL cr,
+ effect(KILL cr,
KILL x0, KILL x1, KILL x2, KILL x3,
KILL x4, KILL x5, KILL x6, KILL x7,
KILL x8, KILL x9, KILL x10, KILL x11,
KILL x12, KILL x13, KILL x14, KILL x15);
- format %{ "zLoadBarrierSlowRegXmmAndYmm $dst, $src" %}
+ format %{ "lea $dst, $src\n\t"
+ "call #ZLoadBarrierSlowPath" %}
ins_encode %{
z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, false /* weak */);
%}
-
ins_pipe(pipe_slow);
%}
// For ZMM enabled processors
instruct zLoadBarrierSlowRegZmm(rRegP dst, memory src, rFlagsReg cr,
- rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
+ rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3,
rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
@@ -79,10 +78,10 @@
rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
- match(Set dst (LoadBarrierSlowReg src));
- predicate((UseAVX == 3) && !n->as_LoadBarrierSlowReg()->is_weak());
+ match(Set dst (LoadBarrierSlowReg src dst));
+ predicate(UseAVX == 3 && !n->as_LoadBarrierSlowReg()->is_weak());
- effect(DEF dst, KILL cr,
+ effect(KILL cr,
KILL x0, KILL x1, KILL x2, KILL x3,
KILL x4, KILL x5, KILL x6, KILL x7,
KILL x8, KILL x9, KILL x10, KILL x11,
@@ -92,43 +91,42 @@
KILL x24, KILL x25, KILL x26, KILL x27,
KILL x28, KILL x29, KILL x30, KILL x31);
- format %{ "zLoadBarrierSlowRegZmm $dst, $src" %}
+ format %{ "lea $dst, $src\n\t"
+ "call #ZLoadBarrierSlowPath" %}
ins_encode %{
z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, false /* weak */);
%}
-
ins_pipe(pipe_slow);
%}
// For XMM and YMM enabled processors
instruct zLoadBarrierWeakSlowRegXmmAndYmm(rRegP dst, memory src, rFlagsReg cr,
- rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
+ rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3,
rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
+ match(Set dst (LoadBarrierSlowReg src dst));
+ predicate(UseAVX <= 2 && n->as_LoadBarrierSlowReg()->is_weak());
- match(Set dst (LoadBarrierSlowReg src));
- predicate((UseAVX <= 2) && n->as_LoadBarrierSlowReg()->is_weak());
-
- effect(DEF dst, KILL cr,
+ effect(KILL cr,
KILL x0, KILL x1, KILL x2, KILL x3,
KILL x4, KILL x5, KILL x6, KILL x7,
KILL x8, KILL x9, KILL x10, KILL x11,
KILL x12, KILL x13, KILL x14, KILL x15);
- format %{ "zLoadBarrierWeakSlowRegXmmAndYmm $dst, $src" %}
+ format %{ "lea $dst, $src\n\t"
+ "call #ZLoadBarrierSlowPath" %}
ins_encode %{
z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, true /* weak */);
%}
-
ins_pipe(pipe_slow);
%}
// For ZMM enabled processors
instruct zLoadBarrierWeakSlowRegZmm(rRegP dst, memory src, rFlagsReg cr,
- rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
+ rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3,
rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
@@ -137,10 +135,10 @@
rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
- match(Set dst (LoadBarrierSlowReg src));
- predicate((UseAVX == 3) && n->as_LoadBarrierSlowReg()->is_weak());
+ match(Set dst (LoadBarrierSlowReg src dst));
+ predicate(UseAVX == 3 && n->as_LoadBarrierSlowReg()->is_weak());
- effect(DEF dst, KILL cr,
+ effect(KILL cr,
KILL x0, KILL x1, KILL x2, KILL x3,
KILL x4, KILL x5, KILL x6, KILL x7,
KILL x8, KILL x9, KILL x10, KILL x11,
@@ -150,12 +148,12 @@
KILL x24, KILL x25, KILL x26, KILL x27,
KILL x28, KILL x29, KILL x30, KILL x31);
- format %{ "zLoadBarrierWeakSlowRegZmm $dst, $src" %}
+ format %{ "lea $dst, $src\n\t"
+ "call #ZLoadBarrierSlowPath" %}
ins_encode %{
z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, true /* weak */);
%}
-
ins_pipe(pipe_slow);
%}
--- a/src/hotspot/share/adlc/formssel.cpp Tue Jul 02 11:33:01 2019 +0200
+++ b/src/hotspot/share/adlc/formssel.cpp Tue Jul 02 11:33:05 2019 +0200
@@ -3513,7 +3513,7 @@
"GetAndSetB", "GetAndSetS", "GetAndAddI", "GetAndSetI", "GetAndSetP",
"GetAndAddB", "GetAndAddS", "GetAndAddL", "GetAndSetL", "GetAndSetN",
#if INCLUDE_ZGC
- "LoadBarrierSlowReg", "ZGetAndSetP", "ZCompareAndSwapP", "ZCompareAndExchangeP", "ZWeakCompareAndSwapP",
+ "ZGetAndSetP", "ZCompareAndSwapP", "ZCompareAndExchangeP", "ZWeakCompareAndSwapP",
#endif
"ClearArray"
};
--- a/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp Tue Jul 02 11:33:01 2019 +0200
+++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp Tue Jul 02 11:33:05 2019 +0200
@@ -540,8 +540,8 @@
Node* then = igvn.transform(new IfTrueNode(iff));
Node* elsen = igvn.transform(new IfFalseNode(iff));
- Node* new_loadp = igvn.transform(new LoadBarrierSlowRegNode(then, in_mem, in_adr, in_val->adr_type(),
- (const TypePtr*) in_val->bottom_type(), MemNode::unordered, barrier->is_weak()));
+ Node* new_loadp = igvn.transform(new LoadBarrierSlowRegNode(then, in_adr, in_val,
+ (const TypePtr*) in_val->bottom_type(), barrier->is_weak()));
// Create the final region/phi pair to converge cntl/data paths to downstream code
Node* result_region = igvn.transform(new RegionNode(3));
@@ -667,7 +667,6 @@
case Op_ZCompareAndExchangeP:
case Op_ZCompareAndSwapP:
case Op_ZWeakCompareAndSwapP:
- case Op_LoadBarrierSlowReg:
#ifdef ASSERT
if (VerifyOptoOopOffsets) {
MemNode *mem = n->as_Mem();
--- a/src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp Tue Jul 02 11:33:01 2019 +0200
+++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp Tue Jul 02 11:33:05 2019 +0200
@@ -104,22 +104,25 @@
}
};
-class LoadBarrierSlowRegNode : public LoadPNode {
+class LoadBarrierSlowRegNode : public TypeNode {
private:
- bool _is_weak;
+ bool _is_weak;
public:
LoadBarrierSlowRegNode(Node *c,
- Node *mem,
Node *adr,
- const TypePtr *at,
+ Node *src,
const TypePtr* t,
- MemOrd mo,
- bool weak = false,
- ControlDependency control_dependency = DependsOnlyOnTest) :
- LoadPNode(c, mem, adr, at, t, mo, control_dependency), _is_weak(weak) {
+ bool weak) :
+ TypeNode(t, 3), _is_weak(weak) {
+ init_req(1, adr);
+ init_req(2, src);
init_class_id(Class_LoadBarrierSlowReg);
}
+ virtual uint size_of() const {
+ return sizeof(*this);
+ }
+
virtual const char * name() {
return "LoadBarrierSlowRegNode";
}
--- a/src/hotspot/share/opto/lcm.cpp Tue Jul 02 11:33:01 2019 +0200
+++ b/src/hotspot/share/opto/lcm.cpp Tue Jul 02 11:33:05 2019 +0200
@@ -170,7 +170,6 @@
case Op_LoadI:
case Op_LoadL:
case Op_LoadP:
- case Op_LoadBarrierSlowReg:
case Op_LoadN:
case Op_LoadS:
case Op_LoadKlass:
--- a/src/hotspot/share/opto/loopnode.cpp Tue Jul 02 11:33:01 2019 +0200
+++ b/src/hotspot/share/opto/loopnode.cpp Tue Jul 02 11:33:05 2019 +0200
@@ -4294,7 +4294,6 @@
case Op_LoadL:
case Op_LoadS:
case Op_LoadP:
- case Op_LoadBarrierSlowReg:
case Op_LoadN:
case Op_LoadRange:
case Op_LoadD_unaligned:
--- a/src/hotspot/share/opto/node.hpp Tue Jul 02 11:33:01 2019 +0200
+++ b/src/hotspot/share/opto/node.hpp Tue Jul 02 11:33:05 2019 +0200
@@ -675,6 +675,7 @@
DEFINE_CLASS_ID(EncodeNarrowPtr, Type, 6)
DEFINE_CLASS_ID(EncodeP, EncodeNarrowPtr, 0)
DEFINE_CLASS_ID(EncodePKlass, EncodeNarrowPtr, 1)
+ DEFINE_CLASS_ID(LoadBarrierSlowReg, Type, 7)
DEFINE_CLASS_ID(Proj, Node, 3)
DEFINE_CLASS_ID(CatchProj, Proj, 0)
@@ -688,7 +689,6 @@
DEFINE_CLASS_ID(Mem, Node, 4)
DEFINE_CLASS_ID(Load, Mem, 0)
DEFINE_CLASS_ID(LoadVector, Load, 0)
- DEFINE_CLASS_ID(LoadBarrierSlowReg, Load, 1)
DEFINE_CLASS_ID(Store, Mem, 1)
DEFINE_CLASS_ID(StoreVector, Store, 0)
DEFINE_CLASS_ID(LoadStore, Mem, 2)
--- a/src/hotspot/share/opto/vectornode.cpp Tue Jul 02 11:33:01 2019 +0200
+++ b/src/hotspot/share/opto/vectornode.cpp Tue Jul 02 11:33:05 2019 +0200
@@ -297,7 +297,6 @@
case Op_LoadI: case Op_LoadL:
case Op_LoadF: case Op_LoadD:
case Op_LoadP: case Op_LoadN:
- case Op_LoadBarrierSlowReg:
*start = 0;
*end = 0; // no vector operands
break;